xref: /llvm-project/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp (revision d526b13e61df83cbe05fae29afb8e1fe8a82292b)
1 //===-- SelectionDAGBuilder.cpp - Selection-DAG building ------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This implements routines for translating from LLVM IR into SelectionDAG IR.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "SelectionDAGBuilder.h"
15 #include "SDNodeDbgValue.h"
16 #include "llvm/ADT/BitVector.h"
17 #include "llvm/ADT/Optional.h"
18 #include "llvm/ADT/SmallSet.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/Analysis/AliasAnalysis.h"
21 #include "llvm/Analysis/BranchProbabilityInfo.h"
22 #include "llvm/Analysis/ConstantFolding.h"
23 #include "llvm/Analysis/Loads.h"
24 #include "llvm/Analysis/TargetLibraryInfo.h"
25 #include "llvm/Analysis/ValueTracking.h"
26 #include "llvm/Analysis/VectorUtils.h"
27 #include "llvm/CodeGen/Analysis.h"
28 #include "llvm/CodeGen/FastISel.h"
29 #include "llvm/CodeGen/FunctionLoweringInfo.h"
30 #include "llvm/CodeGen/GCMetadata.h"
31 #include "llvm/CodeGen/GCStrategy.h"
32 #include "llvm/CodeGen/MachineFrameInfo.h"
33 #include "llvm/CodeGen/MachineFunction.h"
34 #include "llvm/CodeGen/MachineInstrBuilder.h"
35 #include "llvm/CodeGen/MachineJumpTableInfo.h"
36 #include "llvm/CodeGen/MachineModuleInfo.h"
37 #include "llvm/CodeGen/MachineRegisterInfo.h"
38 #include "llvm/CodeGen/SelectionDAG.h"
39 #include "llvm/CodeGen/SelectionDAGTargetInfo.h"
40 #include "llvm/CodeGen/StackMaps.h"
41 #include "llvm/CodeGen/WinEHFuncInfo.h"
42 #include "llvm/IR/CallingConv.h"
43 #include "llvm/IR/ConstantRange.h"
44 #include "llvm/IR/Constants.h"
45 #include "llvm/IR/DataLayout.h"
46 #include "llvm/IR/DebugInfo.h"
47 #include "llvm/IR/DerivedTypes.h"
48 #include "llvm/IR/Function.h"
49 #include "llvm/IR/GetElementPtrTypeIterator.h"
50 #include "llvm/IR/GlobalVariable.h"
51 #include "llvm/IR/InlineAsm.h"
52 #include "llvm/IR/Instructions.h"
53 #include "llvm/IR/IntrinsicInst.h"
54 #include "llvm/IR/Intrinsics.h"
55 #include "llvm/IR/LLVMContext.h"
56 #include "llvm/IR/Module.h"
57 #include "llvm/IR/Statepoint.h"
58 #include "llvm/MC/MCSymbol.h"
59 #include "llvm/Support/CommandLine.h"
60 #include "llvm/Support/Debug.h"
61 #include "llvm/Support/ErrorHandling.h"
62 #include "llvm/Support/MathExtras.h"
63 #include "llvm/Support/raw_ostream.h"
64 #include "llvm/Target/TargetFrameLowering.h"
65 #include "llvm/Target/TargetInstrInfo.h"
66 #include "llvm/Target/TargetIntrinsicInfo.h"
67 #include "llvm/Target/TargetLowering.h"
68 #include "llvm/Target/TargetOptions.h"
69 #include "llvm/Target/TargetSubtargetInfo.h"
70 #include <algorithm>
71 #include <utility>
72 using namespace llvm;
73 
74 #define DEBUG_TYPE "isel"
75 
76 /// LimitFloatPrecision - Generate low-precision inline sequences for
77 /// some float libcalls (6, 8 or 12 bits).
78 static unsigned LimitFloatPrecision;
79 
80 static cl::opt<unsigned, true>
81 LimitFPPrecision("limit-float-precision",
82                  cl::desc("Generate low-precision inline sequences "
83                           "for some float libcalls"),
84                  cl::location(LimitFloatPrecision),
85                  cl::init(0));
86 // Limit the width of DAG chains. This is important in general to prevent
87 // DAG-based analysis from blowing up. For example, alias analysis and
88 // load clustering may not complete in reasonable time. It is difficult to
89 // recognize and avoid this situation within each individual analysis, and
90 // future analyses are likely to have the same behavior. Limiting DAG width is
91 // the safe approach and will be especially important with global DAGs.
92 //
93 // MaxParallelChains default is arbitrarily high to avoid affecting
94 // optimization, but could be lowered to improve compile time. Any ld-ld-st-st
95 // sequence over this should have been converted to llvm.memcpy by the
96 // frontend. It is easy to induce this behavior with .ll code such as:
97 // %buffer = alloca [4096 x i8]
98 // %data = load [4096 x i8]* %argPtr
99 // store [4096 x i8] %data, [4096 x i8]* %buffer
100 static const unsigned MaxParallelChains = 64;
101 
102 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
103                                       const SDValue *Parts, unsigned NumParts,
104                                       MVT PartVT, EVT ValueVT, const Value *V);
105 
106 /// getCopyFromParts - Create a value that contains the specified legal parts
107 /// combined into the value they represent.  If the parts combine to a type
108 /// larger than ValueVT then AssertOp can be used to specify whether the extra
109 /// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
110 /// (ISD::AssertSext).
111 static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL,
112                                 const SDValue *Parts, unsigned NumParts,
113                                 MVT PartVT, EVT ValueVT, const Value *V,
114                                 Optional<ISD::NodeType> AssertOp = None) {
115   if (ValueVT.isVector())
116     return getCopyFromPartsVector(DAG, DL, Parts, NumParts,
117                                   PartVT, ValueVT, V);
118 
119   assert(NumParts > 0 && "No parts to assemble!");
120   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
121   SDValue Val = Parts[0];
122 
123   if (NumParts > 1) {
124     // Assemble the value from multiple parts.
125     if (ValueVT.isInteger()) {
126       unsigned PartBits = PartVT.getSizeInBits();
127       unsigned ValueBits = ValueVT.getSizeInBits();
128 
129       // Assemble the power of 2 part.
130       unsigned RoundParts = NumParts & (NumParts - 1) ?
131         1 << Log2_32(NumParts) : NumParts;
132       unsigned RoundBits = PartBits * RoundParts;
133       EVT RoundVT = RoundBits == ValueBits ?
134         ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
135       SDValue Lo, Hi;
136 
137       EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
138 
139       if (RoundParts > 2) {
140         Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2,
141                               PartVT, HalfVT, V);
142         Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2,
143                               RoundParts / 2, PartVT, HalfVT, V);
144       } else {
145         Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]);
146         Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]);
147       }
148 
149       if (DAG.getDataLayout().isBigEndian())
150         std::swap(Lo, Hi);
151 
152       Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi);
153 
154       if (RoundParts < NumParts) {
155         // Assemble the trailing non-power-of-2 part.
156         unsigned OddParts = NumParts - RoundParts;
157         EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
158         Hi = getCopyFromParts(DAG, DL,
159                               Parts + RoundParts, OddParts, PartVT, OddVT, V);
160 
161         // Combine the round and odd parts.
162         Lo = Val;
163         if (DAG.getDataLayout().isBigEndian())
164           std::swap(Lo, Hi);
165         EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
166         Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi);
167         Hi =
168             DAG.getNode(ISD::SHL, DL, TotalVT, Hi,
169                         DAG.getConstant(Lo.getValueSizeInBits(), DL,
170                                         TLI.getPointerTy(DAG.getDataLayout())));
171         Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo);
172         Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi);
173       }
174     } else if (PartVT.isFloatingPoint()) {
175       // FP split into multiple FP parts (for ppcf128)
176       assert(ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
177              "Unexpected split");
178       SDValue Lo, Hi;
179       Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]);
180       Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]);
181       if (TLI.hasBigEndianPartOrdering(ValueVT, DAG.getDataLayout()))
182         std::swap(Lo, Hi);
183       Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi);
184     } else {
185       // FP split into integer parts (soft fp)
186       assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
187              !PartVT.isVector() && "Unexpected split");
188       EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
189       Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V);
190     }
191   }
192 
193   // There is now one part, held in Val.  Correct it to match ValueVT.
194   // PartEVT is the type of the register class that holds the value.
195   // ValueVT is the type of the inline asm operation.
196   EVT PartEVT = Val.getValueType();
197 
198   if (PartEVT == ValueVT)
199     return Val;
200 
201   if (PartEVT.isInteger() && ValueVT.isFloatingPoint() &&
202       ValueVT.bitsLT(PartEVT)) {
203     // For an FP value in an integer part, we need to truncate to the right
204     // width first.
205     PartEVT = EVT::getIntegerVT(*DAG.getContext(),  ValueVT.getSizeInBits());
206     Val = DAG.getNode(ISD::TRUNCATE, DL, PartEVT, Val);
207   }
208 
209   // Handle types that have the same size.
210   if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits())
211     return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
212 
213   // Handle types with different sizes.
214   if (PartEVT.isInteger() && ValueVT.isInteger()) {
215     if (ValueVT.bitsLT(PartEVT)) {
216       // For a truncate, see if we have any information to
217       // indicate whether the truncated bits will always be
218       // zero or sign-extension.
219       if (AssertOp.hasValue())
220         Val = DAG.getNode(*AssertOp, DL, PartEVT, Val,
221                           DAG.getValueType(ValueVT));
222       return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
223     }
224     return DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
225   }
226 
227   if (PartEVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
228     // FP_ROUND's are always exact here.
229     if (ValueVT.bitsLT(Val.getValueType()))
230       return DAG.getNode(
231           ISD::FP_ROUND, DL, ValueVT, Val,
232           DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout())));
233 
234     return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val);
235   }
236 
237   llvm_unreachable("Unknown mismatch!");
238 }
239 
240 static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V,
241                                               const Twine &ErrMsg) {
242   const Instruction *I = dyn_cast_or_null<Instruction>(V);
243   if (!V)
244     return Ctx.emitError(ErrMsg);
245 
246   const char *AsmError = ", possible invalid constraint for vector type";
247   if (const CallInst *CI = dyn_cast<CallInst>(I))
248     if (isa<InlineAsm>(CI->getCalledValue()))
249       return Ctx.emitError(I, ErrMsg + AsmError);
250 
251   return Ctx.emitError(I, ErrMsg);
252 }
253 
254 /// getCopyFromPartsVector - Create a value that contains the specified legal
255 /// parts combined into the value they represent.  If the parts combine to a
256 /// type larger than ValueVT then AssertOp can be used to specify whether the
257 /// extra bits are known to be zero (ISD::AssertZext) or sign extended from
258 /// ValueVT (ISD::AssertSext).
259 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
260                                       const SDValue *Parts, unsigned NumParts,
261                                       MVT PartVT, EVT ValueVT, const Value *V) {
262   assert(ValueVT.isVector() && "Not a vector value");
263   assert(NumParts > 0 && "No parts to assemble!");
264   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
265   SDValue Val = Parts[0];
266 
267   // Handle a multi-element vector.
268   if (NumParts > 1) {
269     EVT IntermediateVT;
270     MVT RegisterVT;
271     unsigned NumIntermediates;
272     unsigned NumRegs =
273     TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
274                                NumIntermediates, RegisterVT);
275     assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
276     NumParts = NumRegs; // Silence a compiler warning.
277     assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
278     assert(RegisterVT.getSizeInBits() ==
279            Parts[0].getSimpleValueType().getSizeInBits() &&
280            "Part type sizes don't match!");
281 
282     // Assemble the parts into intermediate operands.
283     SmallVector<SDValue, 8> Ops(NumIntermediates);
284     if (NumIntermediates == NumParts) {
285       // If the register was not expanded, truncate or copy the value,
286       // as appropriate.
287       for (unsigned i = 0; i != NumParts; ++i)
288         Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1,
289                                   PartVT, IntermediateVT, V);
290     } else if (NumParts > 0) {
291       // If the intermediate type was expanded, build the intermediate
292       // operands from the parts.
293       assert(NumParts % NumIntermediates == 0 &&
294              "Must expand into a divisible number of parts!");
295       unsigned Factor = NumParts / NumIntermediates;
296       for (unsigned i = 0; i != NumIntermediates; ++i)
297         Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor,
298                                   PartVT, IntermediateVT, V);
299     }
300 
301     // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
302     // intermediate operands.
303     Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS
304                                                 : ISD::BUILD_VECTOR,
305                       DL, ValueVT, Ops);
306   }
307 
308   // There is now one part, held in Val.  Correct it to match ValueVT.
309   EVT PartEVT = Val.getValueType();
310 
311   if (PartEVT == ValueVT)
312     return Val;
313 
314   if (PartEVT.isVector()) {
315     // If the element type of the source/dest vectors are the same, but the
316     // parts vector has more elements than the value vector, then we have a
317     // vector widening case (e.g. <2 x float> -> <4 x float>).  Extract the
318     // elements we want.
319     if (PartEVT.getVectorElementType() == ValueVT.getVectorElementType()) {
320       assert(PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements() &&
321              "Cannot narrow, it would be a lossy transformation");
322       return DAG.getNode(
323           ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
324           DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
325     }
326 
327     // Vector/Vector bitcast.
328     if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
329       return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
330 
331     assert(PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements() &&
332       "Cannot handle this kind of promotion");
333     // Promoted vector extract
334     return DAG.getAnyExtOrTrunc(Val, DL, ValueVT);
335 
336   }
337 
338   // Trivial bitcast if the types are the same size and the destination
339   // vector type is legal.
340   if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits() &&
341       TLI.isTypeLegal(ValueVT))
342     return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
343 
344   // Handle cases such as i8 -> <1 x i1>
345   if (ValueVT.getVectorNumElements() != 1) {
346     diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
347                                       "non-trivial scalar-to-vector conversion");
348     return DAG.getUNDEF(ValueVT);
349   }
350 
351   EVT ValueSVT = ValueVT.getVectorElementType();
352   if (ValueVT.getVectorNumElements() == 1 && ValueSVT != PartEVT)
353     Val = ValueVT.isFloatingPoint() ? DAG.getFPExtendOrRound(Val, DL, ValueSVT)
354                                     : DAG.getAnyExtOrTrunc(Val, DL, ValueSVT);
355 
356   return DAG.getBuildVector(ValueVT, DL, Val);
357 }
358 
359 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl,
360                                  SDValue Val, SDValue *Parts, unsigned NumParts,
361                                  MVT PartVT, const Value *V);
362 
363 /// getCopyToParts - Create a series of nodes that contain the specified value
364 /// split into legal parts.  If the parts contain more bits than Val, then, for
365 /// integers, ExtendKind can be used to specify how to generate the extra bits.
366 static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val,
367                            SDValue *Parts, unsigned NumParts, MVT PartVT,
368                            const Value *V,
369                            ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
370   EVT ValueVT = Val.getValueType();
371 
372   // Handle the vector case separately.
373   if (ValueVT.isVector())
374     return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V);
375 
376   unsigned PartBits = PartVT.getSizeInBits();
377   unsigned OrigNumParts = NumParts;
378   assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) &&
379          "Copying to an illegal type!");
380 
381   if (NumParts == 0)
382     return;
383 
384   assert(!ValueVT.isVector() && "Vector case handled elsewhere");
385   EVT PartEVT = PartVT;
386   if (PartEVT == ValueVT) {
387     assert(NumParts == 1 && "No-op copy with multiple parts!");
388     Parts[0] = Val;
389     return;
390   }
391 
392   if (NumParts * PartBits > ValueVT.getSizeInBits()) {
393     // If the parts cover more bits than the value has, promote the value.
394     if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
395       assert(NumParts == 1 && "Do not know what to promote to!");
396       Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val);
397     } else {
398       if (ValueVT.isFloatingPoint()) {
399         // FP values need to be bitcast, then extended if they are being put
400         // into a larger container.
401         ValueVT = EVT::getIntegerVT(*DAG.getContext(),  ValueVT.getSizeInBits());
402         Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
403       }
404       assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
405              ValueVT.isInteger() &&
406              "Unknown mismatch!");
407       ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
408       Val = DAG.getNode(ExtendKind, DL, ValueVT, Val);
409       if (PartVT == MVT::x86mmx)
410         Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
411     }
412   } else if (PartBits == ValueVT.getSizeInBits()) {
413     // Different types of the same size.
414     assert(NumParts == 1 && PartEVT != ValueVT);
415     Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
416   } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
417     // If the parts cover less bits than value has, truncate the value.
418     assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
419            ValueVT.isInteger() &&
420            "Unknown mismatch!");
421     ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
422     Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
423     if (PartVT == MVT::x86mmx)
424       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
425   }
426 
427   // The value may have changed - recompute ValueVT.
428   ValueVT = Val.getValueType();
429   assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
430          "Failed to tile the value with PartVT!");
431 
432   if (NumParts == 1) {
433     if (PartEVT != ValueVT) {
434       diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
435                                         "scalar-to-vector conversion failed");
436       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
437     }
438 
439     Parts[0] = Val;
440     return;
441   }
442 
443   // Expand the value into multiple parts.
444   if (NumParts & (NumParts - 1)) {
445     // The number of parts is not a power of 2.  Split off and copy the tail.
446     assert(PartVT.isInteger() && ValueVT.isInteger() &&
447            "Do not know what to expand to!");
448     unsigned RoundParts = 1 << Log2_32(NumParts);
449     unsigned RoundBits = RoundParts * PartBits;
450     unsigned OddParts = NumParts - RoundParts;
451     SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val,
452                                  DAG.getIntPtrConstant(RoundBits, DL));
453     getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V);
454 
455     if (DAG.getDataLayout().isBigEndian())
456       // The odd parts were reversed by getCopyToParts - unreverse them.
457       std::reverse(Parts + RoundParts, Parts + NumParts);
458 
459     NumParts = RoundParts;
460     ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
461     Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
462   }
463 
464   // The number of parts is a power of 2.  Repeatedly bisect the value using
465   // EXTRACT_ELEMENT.
466   Parts[0] = DAG.getNode(ISD::BITCAST, DL,
467                          EVT::getIntegerVT(*DAG.getContext(),
468                                            ValueVT.getSizeInBits()),
469                          Val);
470 
471   for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
472     for (unsigned i = 0; i < NumParts; i += StepSize) {
473       unsigned ThisBits = StepSize * PartBits / 2;
474       EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
475       SDValue &Part0 = Parts[i];
476       SDValue &Part1 = Parts[i+StepSize/2];
477 
478       Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
479                           ThisVT, Part0, DAG.getIntPtrConstant(1, DL));
480       Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
481                           ThisVT, Part0, DAG.getIntPtrConstant(0, DL));
482 
483       if (ThisBits == PartBits && ThisVT != PartVT) {
484         Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0);
485         Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1);
486       }
487     }
488   }
489 
490   if (DAG.getDataLayout().isBigEndian())
491     std::reverse(Parts, Parts + OrigNumParts);
492 }
493 
494 
495 /// getCopyToPartsVector - Create a series of nodes that contain the specified
496 /// value split into legal parts.
497 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
498                                  SDValue Val, SDValue *Parts, unsigned NumParts,
499                                  MVT PartVT, const Value *V) {
500   EVT ValueVT = Val.getValueType();
501   assert(ValueVT.isVector() && "Not a vector");
502   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
503 
504   if (NumParts == 1) {
505     EVT PartEVT = PartVT;
506     if (PartEVT == ValueVT) {
507       // Nothing to do.
508     } else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
509       // Bitconvert vector->vector case.
510       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
511     } else if (PartVT.isVector() &&
512                PartEVT.getVectorElementType() == ValueVT.getVectorElementType() &&
513                PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements()) {
514       EVT ElementVT = PartVT.getVectorElementType();
515       // Vector widening case, e.g. <2 x float> -> <4 x float>.  Shuffle in
516       // undef elements.
517       SmallVector<SDValue, 16> Ops;
518       for (unsigned i = 0, e = ValueVT.getVectorNumElements(); i != e; ++i)
519         Ops.push_back(DAG.getNode(
520             ISD::EXTRACT_VECTOR_ELT, DL, ElementVT, Val,
521             DAG.getConstant(i, DL, TLI.getVectorIdxTy(DAG.getDataLayout()))));
522 
523       for (unsigned i = ValueVT.getVectorNumElements(),
524            e = PartVT.getVectorNumElements(); i != e; ++i)
525         Ops.push_back(DAG.getUNDEF(ElementVT));
526 
527       Val = DAG.getBuildVector(PartVT, DL, Ops);
528 
529       // FIXME: Use CONCAT for 2x -> 4x.
530 
531       //SDValue UndefElts = DAG.getUNDEF(VectorTy);
532       //Val = DAG.getNode(ISD::CONCAT_VECTORS, DL, PartVT, Val, UndefElts);
533     } else if (PartVT.isVector() &&
534                PartEVT.getVectorElementType().bitsGE(
535                  ValueVT.getVectorElementType()) &&
536                PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements()) {
537 
538       // Promoted vector extract
539       Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
540     } else{
541       // Vector -> scalar conversion.
542       assert(ValueVT.getVectorNumElements() == 1 &&
543              "Only trivial vector-to-scalar conversions should get here!");
544       Val = DAG.getNode(
545           ISD::EXTRACT_VECTOR_ELT, DL, PartVT, Val,
546           DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
547     }
548 
549     assert(Val.getValueType() == PartVT && "Unexpected vector part value type");
550     Parts[0] = Val;
551     return;
552   }
553 
554   // Handle a multi-element vector.
555   EVT IntermediateVT;
556   MVT RegisterVT;
557   unsigned NumIntermediates;
558   unsigned NumRegs = TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT,
559                                                 IntermediateVT,
560                                                 NumIntermediates, RegisterVT);
561   unsigned NumElements = ValueVT.getVectorNumElements();
562 
563   assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
564   NumParts = NumRegs; // Silence a compiler warning.
565   assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
566 
567   // Split the vector into intermediate operands.
568   SmallVector<SDValue, 8> Ops(NumIntermediates);
569   for (unsigned i = 0; i != NumIntermediates; ++i) {
570     if (IntermediateVT.isVector())
571       Ops[i] =
572           DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, IntermediateVT, Val,
573                       DAG.getConstant(i * (NumElements / NumIntermediates), DL,
574                                       TLI.getVectorIdxTy(DAG.getDataLayout())));
575     else
576       Ops[i] = DAG.getNode(
577           ISD::EXTRACT_VECTOR_ELT, DL, IntermediateVT, Val,
578           DAG.getConstant(i, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
579   }
580 
581   // Split the intermediate operands into legal parts.
582   if (NumParts == NumIntermediates) {
583     // If the register was not expanded, promote or copy the value,
584     // as appropriate.
585     for (unsigned i = 0; i != NumParts; ++i)
586       getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V);
587   } else if (NumParts > 0) {
588     // If the intermediate type was expanded, split each the value into
589     // legal parts.
590     assert(NumIntermediates != 0 && "division by zero");
591     assert(NumParts % NumIntermediates == 0 &&
592            "Must expand into a divisible number of parts!");
593     unsigned Factor = NumParts / NumIntermediates;
594     for (unsigned i = 0; i != NumIntermediates; ++i)
595       getCopyToParts(DAG, DL, Ops[i], &Parts[i*Factor], Factor, PartVT, V);
596   }
597 }
598 
599 RegsForValue::RegsForValue() {}
600 
601 RegsForValue::RegsForValue(const SmallVector<unsigned, 4> &regs, MVT regvt,
602                            EVT valuevt)
603     : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs) {}
604 
605 RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
606                            const DataLayout &DL, unsigned Reg, Type *Ty) {
607   ComputeValueVTs(TLI, DL, Ty, ValueVTs);
608 
609   for (EVT ValueVT : ValueVTs) {
610     unsigned NumRegs = TLI.getNumRegisters(Context, ValueVT);
611     MVT RegisterVT = TLI.getRegisterType(Context, ValueVT);
612     for (unsigned i = 0; i != NumRegs; ++i)
613       Regs.push_back(Reg + i);
614     RegVTs.push_back(RegisterVT);
615     Reg += NumRegs;
616   }
617 }
618 
619 SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
620                                       FunctionLoweringInfo &FuncInfo,
621                                       const SDLoc &dl, SDValue &Chain,
622                                       SDValue *Flag, const Value *V) const {
623   // A Value with type {} or [0 x %t] needs no registers.
624   if (ValueVTs.empty())
625     return SDValue();
626 
627   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
628 
629   // Assemble the legal parts into the final values.
630   SmallVector<SDValue, 4> Values(ValueVTs.size());
631   SmallVector<SDValue, 8> Parts;
632   for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
633     // Copy the legal parts from the registers.
634     EVT ValueVT = ValueVTs[Value];
635     unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVT);
636     MVT RegisterVT = RegVTs[Value];
637 
638     Parts.resize(NumRegs);
639     for (unsigned i = 0; i != NumRegs; ++i) {
640       SDValue P;
641       if (!Flag) {
642         P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
643       } else {
644         P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag);
645         *Flag = P.getValue(2);
646       }
647 
648       Chain = P.getValue(1);
649       Parts[i] = P;
650 
651       // If the source register was virtual and if we know something about it,
652       // add an assert node.
653       if (!TargetRegisterInfo::isVirtualRegister(Regs[Part+i]) ||
654           !RegisterVT.isInteger() || RegisterVT.isVector())
655         continue;
656 
657       const FunctionLoweringInfo::LiveOutInfo *LOI =
658         FuncInfo.GetLiveOutRegInfo(Regs[Part+i]);
659       if (!LOI)
660         continue;
661 
662       unsigned RegSize = RegisterVT.getSizeInBits();
663       unsigned NumSignBits = LOI->NumSignBits;
664       unsigned NumZeroBits = LOI->Known.Zero.countLeadingOnes();
665 
666       if (NumZeroBits == RegSize) {
667         // The current value is a zero.
668         // Explicitly express that as it would be easier for
669         // optimizations to kick in.
670         Parts[i] = DAG.getConstant(0, dl, RegisterVT);
671         continue;
672       }
673 
674       // FIXME: We capture more information than the dag can represent.  For
675       // now, just use the tightest assertzext/assertsext possible.
676       bool isSExt = true;
677       EVT FromVT(MVT::Other);
678       if (NumSignBits == RegSize) {
679         isSExt = true;   // ASSERT SEXT 1
680         FromVT = MVT::i1;
681       } else if (NumZeroBits >= RegSize - 1) {
682         isSExt = false;  // ASSERT ZEXT 1
683         FromVT = MVT::i1;
684       } else if (NumSignBits > RegSize - 8) {
685         isSExt = true;   // ASSERT SEXT 8
686         FromVT = MVT::i8;
687       } else if (NumZeroBits >= RegSize - 8) {
688         isSExt = false;  // ASSERT ZEXT 8
689         FromVT = MVT::i8;
690       } else if (NumSignBits > RegSize - 16) {
691         isSExt = true;   // ASSERT SEXT 16
692         FromVT = MVT::i16;
693       } else if (NumZeroBits >= RegSize - 16) {
694         isSExt = false;  // ASSERT ZEXT 16
695         FromVT = MVT::i16;
696       } else if (NumSignBits > RegSize - 32) {
697         isSExt = true;   // ASSERT SEXT 32
698         FromVT = MVT::i32;
699       } else if (NumZeroBits >= RegSize - 32) {
700         isSExt = false;  // ASSERT ZEXT 32
701         FromVT = MVT::i32;
702       } else {
703         continue;
704       }
705       // Add an assertion node.
706       assert(FromVT != MVT::Other);
707       Parts[i] = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
708                              RegisterVT, P, DAG.getValueType(FromVT));
709     }
710 
711     Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(),
712                                      NumRegs, RegisterVT, ValueVT, V);
713     Part += NumRegs;
714     Parts.clear();
715   }
716 
717   return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(ValueVTs), Values);
718 }
719 
720 void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG,
721                                  const SDLoc &dl, SDValue &Chain, SDValue *Flag,
722                                  const Value *V,
723                                  ISD::NodeType PreferredExtendType) const {
724   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
725   ISD::NodeType ExtendKind = PreferredExtendType;
726 
727   // Get the list of the values's legal parts.
728   unsigned NumRegs = Regs.size();
729   SmallVector<SDValue, 8> Parts(NumRegs);
730   for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
731     EVT ValueVT = ValueVTs[Value];
732     unsigned NumParts = TLI.getNumRegisters(*DAG.getContext(), ValueVT);
733     MVT RegisterVT = RegVTs[Value];
734 
735     if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT))
736       ExtendKind = ISD::ZERO_EXTEND;
737 
738     getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value),
739                    &Parts[Part], NumParts, RegisterVT, V, ExtendKind);
740     Part += NumParts;
741   }
742 
743   // Copy the parts into the registers.
744   SmallVector<SDValue, 8> Chains(NumRegs);
745   for (unsigned i = 0; i != NumRegs; ++i) {
746     SDValue Part;
747     if (!Flag) {
748       Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
749     } else {
750       Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag);
751       *Flag = Part.getValue(1);
752     }
753 
754     Chains[i] = Part.getValue(0);
755   }
756 
757   if (NumRegs == 1 || Flag)
758     // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is
759     // flagged to it. That is the CopyToReg nodes and the user are considered
760     // a single scheduling unit. If we create a TokenFactor and return it as
761     // chain, then the TokenFactor is both a predecessor (operand) of the
762     // user as well as a successor (the TF operands are flagged to the user).
763     // c1, f1 = CopyToReg
764     // c2, f2 = CopyToReg
765     // c3     = TokenFactor c1, c2
766     // ...
767     //        = op c3, ..., f2
768     Chain = Chains[NumRegs-1];
769   else
770     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
771 }
772 
773 void RegsForValue::AddInlineAsmOperands(unsigned Code, bool HasMatching,
774                                         unsigned MatchingIdx, const SDLoc &dl,
775                                         SelectionDAG &DAG,
776                                         std::vector<SDValue> &Ops) const {
777   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
778 
779   unsigned Flag = InlineAsm::getFlagWord(Code, Regs.size());
780   if (HasMatching)
781     Flag = InlineAsm::getFlagWordForMatchingOp(Flag, MatchingIdx);
782   else if (!Regs.empty() &&
783            TargetRegisterInfo::isVirtualRegister(Regs.front())) {
784     // Put the register class of the virtual registers in the flag word.  That
785     // way, later passes can recompute register class constraints for inline
786     // assembly as well as normal instructions.
787     // Don't do this for tied operands that can use the regclass information
788     // from the def.
789     const MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
790     const TargetRegisterClass *RC = MRI.getRegClass(Regs.front());
791     Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID());
792   }
793 
794   SDValue Res = DAG.getTargetConstant(Flag, dl, MVT::i32);
795   Ops.push_back(Res);
796 
797   unsigned SP = TLI.getStackPointerRegisterToSaveRestore();
798   for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
799     unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value]);
800     MVT RegisterVT = RegVTs[Value];
801     for (unsigned i = 0; i != NumRegs; ++i) {
802       assert(Reg < Regs.size() && "Mismatch in # registers expected");
803       unsigned TheReg = Regs[Reg++];
804       Ops.push_back(DAG.getRegister(TheReg, RegisterVT));
805 
806       if (TheReg == SP && Code == InlineAsm::Kind_Clobber) {
807         // If we clobbered the stack pointer, MFI should know about it.
808         assert(DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment());
809       }
810     }
811   }
812 }
813 
814 void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis &aa,
815                                const TargetLibraryInfo *li) {
816   AA = &aa;
817   GFI = gfi;
818   LibInfo = li;
819   DL = &DAG.getDataLayout();
820   Context = DAG.getContext();
821   LPadToCallSiteMap.clear();
822 }
823 
824 void SelectionDAGBuilder::clear() {
825   NodeMap.clear();
826   UnusedArgNodeMap.clear();
827   PendingLoads.clear();
828   PendingExports.clear();
829   CurInst = nullptr;
830   HasTailCall = false;
831   SDNodeOrder = LowestSDNodeOrder;
832   StatepointLowering.clear();
833 }
834 
835 void SelectionDAGBuilder::clearDanglingDebugInfo() {
836   DanglingDebugInfoMap.clear();
837 }
838 
839 SDValue SelectionDAGBuilder::getRoot() {
840   if (PendingLoads.empty())
841     return DAG.getRoot();
842 
843   if (PendingLoads.size() == 1) {
844     SDValue Root = PendingLoads[0];
845     DAG.setRoot(Root);
846     PendingLoads.clear();
847     return Root;
848   }
849 
850   // Otherwise, we have to make a token factor node.
851   SDValue Root = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
852                              PendingLoads);
853   PendingLoads.clear();
854   DAG.setRoot(Root);
855   return Root;
856 }
857 
858 SDValue SelectionDAGBuilder::getControlRoot() {
859   SDValue Root = DAG.getRoot();
860 
861   if (PendingExports.empty())
862     return Root;
863 
864   // Turn all of the CopyToReg chains into one factored node.
865   if (Root.getOpcode() != ISD::EntryToken) {
866     unsigned i = 0, e = PendingExports.size();
867     for (; i != e; ++i) {
868       assert(PendingExports[i].getNode()->getNumOperands() > 1);
869       if (PendingExports[i].getNode()->getOperand(0) == Root)
870         break;  // Don't add the root if we already indirectly depend on it.
871     }
872 
873     if (i == e)
874       PendingExports.push_back(Root);
875   }
876 
877   Root = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
878                      PendingExports);
879   PendingExports.clear();
880   DAG.setRoot(Root);
881   return Root;
882 }
883 
884 void SelectionDAGBuilder::visit(const Instruction &I) {
885   // Set up outgoing PHI node register values before emitting the terminator.
886   if (isa<TerminatorInst>(&I)) {
887     HandlePHINodesInSuccessorBlocks(I.getParent());
888   }
889 
890   // Increase the SDNodeOrder if dealing with a non-debug instruction.
891   if (!isa<DbgInfoIntrinsic>(I))
892     ++SDNodeOrder;
893 
894   CurInst = &I;
895 
896   visit(I.getOpcode(), I);
897 
898   if (!isa<TerminatorInst>(&I) && !HasTailCall &&
899       !isStatepoint(&I)) // statepoints handle their exports internally
900     CopyToExportRegsIfNeeded(&I);
901 
902   CurInst = nullptr;
903 }
904 
905 void SelectionDAGBuilder::visitPHI(const PHINode &) {
906   llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!");
907 }
908 
909 void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) {
910   // Note: this doesn't use InstVisitor, because it has to work with
911   // ConstantExpr's in addition to instructions.
912   switch (Opcode) {
913   default: llvm_unreachable("Unknown instruction type encountered!");
914     // Build the switch statement using the Instruction.def file.
915 #define HANDLE_INST(NUM, OPCODE, CLASS) \
916     case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
917 #include "llvm/IR/Instruction.def"
918   }
919 }
920 
921 // resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V,
922 // generate the debug data structures now that we've seen its definition.
923 void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V,
924                                                    SDValue Val) {
925   DanglingDebugInfo &DDI = DanglingDebugInfoMap[V];
926   if (DDI.getDI()) {
927     const DbgValueInst *DI = DDI.getDI();
928     DebugLoc dl = DDI.getdl();
929     unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
930     DILocalVariable *Variable = DI->getVariable();
931     DIExpression *Expr = DI->getExpression();
932     assert(Variable->isValidLocationForIntrinsic(dl) &&
933            "Expected inlined-at fields to agree");
934     uint64_t Offset = DI->getOffset();
935     SDDbgValue *SDV;
936     if (Val.getNode()) {
937       if (!EmitFuncArgumentDbgValue(V, Variable, Expr, dl, Offset, false,
938                                     Val)) {
939         SDV = getDbgValue(Val, Variable, Expr, Offset, dl, DbgSDNodeOrder);
940         DAG.AddDbgValue(SDV, Val.getNode(), false);
941       }
942     } else
943       DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
944     DanglingDebugInfoMap[V] = DanglingDebugInfo();
945   }
946 }
947 
948 /// getCopyFromRegs - If there was virtual register allocated for the value V
949 /// emit CopyFromReg of the specified type Ty. Return empty SDValue() otherwise.
950 SDValue SelectionDAGBuilder::getCopyFromRegs(const Value *V, Type *Ty) {
951   DenseMap<const Value *, unsigned>::iterator It = FuncInfo.ValueMap.find(V);
952   SDValue Result;
953 
954   if (It != FuncInfo.ValueMap.end()) {
955     unsigned InReg = It->second;
956     RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
957                      DAG.getDataLayout(), InReg, Ty);
958     SDValue Chain = DAG.getEntryNode();
959     Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
960     resolveDanglingDebugInfo(V, Result);
961   }
962 
963   return Result;
964 }
965 
966 /// getValue - Return an SDValue for the given Value.
967 SDValue SelectionDAGBuilder::getValue(const Value *V) {
968   // If we already have an SDValue for this value, use it. It's important
969   // to do this first, so that we don't create a CopyFromReg if we already
970   // have a regular SDValue.
971   SDValue &N = NodeMap[V];
972   if (N.getNode()) return N;
973 
974   // If there's a virtual register allocated and initialized for this
975   // value, use it.
976   if (SDValue copyFromReg = getCopyFromRegs(V, V->getType()))
977     return copyFromReg;
978 
979   // Otherwise create a new SDValue and remember it.
980   SDValue Val = getValueImpl(V);
981   NodeMap[V] = Val;
982   resolveDanglingDebugInfo(V, Val);
983   return Val;
984 }
985 
986 // Return true if SDValue exists for the given Value
987 bool SelectionDAGBuilder::findValue(const Value *V) const {
988   return (NodeMap.find(V) != NodeMap.end()) ||
989     (FuncInfo.ValueMap.find(V) != FuncInfo.ValueMap.end());
990 }
991 
992 /// getNonRegisterValue - Return an SDValue for the given Value, but
993 /// don't look in FuncInfo.ValueMap for a virtual register.
994 SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) {
995   // If we already have an SDValue for this value, use it.
996   SDValue &N = NodeMap[V];
997   if (N.getNode()) {
998     if (isa<ConstantSDNode>(N) || isa<ConstantFPSDNode>(N)) {
999       // Remove the debug location from the node as the node is about to be used
1000       // in a location which may differ from the original debug location.  This
1001       // is relevant to Constant and ConstantFP nodes because they can appear
1002       // as constant expressions inside PHI nodes.
1003       N->setDebugLoc(DebugLoc());
1004     }
1005     return N;
1006   }
1007 
1008   // Otherwise create a new SDValue and remember it.
1009   SDValue Val = getValueImpl(V);
1010   NodeMap[V] = Val;
1011   resolveDanglingDebugInfo(V, Val);
1012   return Val;
1013 }
1014 
1015 /// getValueImpl - Helper function for getValue and getNonRegisterValue.
1016 /// Create an SDValue for the given value.
1017 SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
1018   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1019 
1020   if (const Constant *C = dyn_cast<Constant>(V)) {
1021     EVT VT = TLI.getValueType(DAG.getDataLayout(), V->getType(), true);
1022 
1023     if (const ConstantInt *CI = dyn_cast<ConstantInt>(C))
1024       return DAG.getConstant(*CI, getCurSDLoc(), VT);
1025 
1026     if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
1027       return DAG.getGlobalAddress(GV, getCurSDLoc(), VT);
1028 
1029     if (isa<ConstantPointerNull>(C)) {
1030       unsigned AS = V->getType()->getPointerAddressSpace();
1031       return DAG.getConstant(0, getCurSDLoc(),
1032                              TLI.getPointerTy(DAG.getDataLayout(), AS));
1033     }
1034 
1035     if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
1036       return DAG.getConstantFP(*CFP, getCurSDLoc(), VT);
1037 
1038     if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
1039       return DAG.getUNDEF(VT);
1040 
1041     if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
1042       visit(CE->getOpcode(), *CE);
1043       SDValue N1 = NodeMap[V];
1044       assert(N1.getNode() && "visit didn't populate the NodeMap!");
1045       return N1;
1046     }
1047 
1048     if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
1049       SmallVector<SDValue, 4> Constants;
1050       for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end();
1051            OI != OE; ++OI) {
1052         SDNode *Val = getValue(*OI).getNode();
1053         // If the operand is an empty aggregate, there are no values.
1054         if (!Val) continue;
1055         // Add each leaf value from the operand to the Constants list
1056         // to form a flattened list of all the values.
1057         for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1058           Constants.push_back(SDValue(Val, i));
1059       }
1060 
1061       return DAG.getMergeValues(Constants, getCurSDLoc());
1062     }
1063 
1064     if (const ConstantDataSequential *CDS =
1065           dyn_cast<ConstantDataSequential>(C)) {
1066       SmallVector<SDValue, 4> Ops;
1067       for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1068         SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode();
1069         // Add each leaf value from the operand to the Constants list
1070         // to form a flattened list of all the values.
1071         for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1072           Ops.push_back(SDValue(Val, i));
1073       }
1074 
1075       if (isa<ArrayType>(CDS->getType()))
1076         return DAG.getMergeValues(Ops, getCurSDLoc());
1077       return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1078     }
1079 
1080     if (C->getType()->isStructTy() || C->getType()->isArrayTy()) {
1081       assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
1082              "Unknown struct or array constant!");
1083 
1084       SmallVector<EVT, 4> ValueVTs;
1085       ComputeValueVTs(TLI, DAG.getDataLayout(), C->getType(), ValueVTs);
1086       unsigned NumElts = ValueVTs.size();
1087       if (NumElts == 0)
1088         return SDValue(); // empty struct
1089       SmallVector<SDValue, 4> Constants(NumElts);
1090       for (unsigned i = 0; i != NumElts; ++i) {
1091         EVT EltVT = ValueVTs[i];
1092         if (isa<UndefValue>(C))
1093           Constants[i] = DAG.getUNDEF(EltVT);
1094         else if (EltVT.isFloatingPoint())
1095           Constants[i] = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1096         else
1097           Constants[i] = DAG.getConstant(0, getCurSDLoc(), EltVT);
1098       }
1099 
1100       return DAG.getMergeValues(Constants, getCurSDLoc());
1101     }
1102 
1103     if (const BlockAddress *BA = dyn_cast<BlockAddress>(C))
1104       return DAG.getBlockAddress(BA, VT);
1105 
1106     VectorType *VecTy = cast<VectorType>(V->getType());
1107     unsigned NumElements = VecTy->getNumElements();
1108 
1109     // Now that we know the number and type of the elements, get that number of
1110     // elements into the Ops array based on what kind of constant it is.
1111     SmallVector<SDValue, 16> Ops;
1112     if (const ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
1113       for (unsigned i = 0; i != NumElements; ++i)
1114         Ops.push_back(getValue(CV->getOperand(i)));
1115     } else {
1116       assert(isa<ConstantAggregateZero>(C) && "Unknown vector constant!");
1117       EVT EltVT =
1118           TLI.getValueType(DAG.getDataLayout(), VecTy->getElementType());
1119 
1120       SDValue Op;
1121       if (EltVT.isFloatingPoint())
1122         Op = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1123       else
1124         Op = DAG.getConstant(0, getCurSDLoc(), EltVT);
1125       Ops.assign(NumElements, Op);
1126     }
1127 
1128     // Create a BUILD_VECTOR node.
1129     return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1130   }
1131 
1132   // If this is a static alloca, generate it as the frameindex instead of
1133   // computation.
1134   if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1135     DenseMap<const AllocaInst*, int>::iterator SI =
1136       FuncInfo.StaticAllocaMap.find(AI);
1137     if (SI != FuncInfo.StaticAllocaMap.end())
1138       return DAG.getFrameIndex(SI->second,
1139                                TLI.getFrameIndexTy(DAG.getDataLayout()));
1140   }
1141 
1142   // If this is an instruction which fast-isel has deferred, select it now.
1143   if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
1144     unsigned InReg = FuncInfo.InitializeRegForValue(Inst);
1145     RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg,
1146                      Inst->getType());
1147     SDValue Chain = DAG.getEntryNode();
1148     return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
1149   }
1150 
1151   llvm_unreachable("Can't get register for value!");
1152 }
1153 
1154 void SelectionDAGBuilder::visitCatchPad(const CatchPadInst &I) {
1155   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1156   bool IsMSVCCXX = Pers == EHPersonality::MSVC_CXX;
1157   bool IsCoreCLR = Pers == EHPersonality::CoreCLR;
1158   MachineBasicBlock *CatchPadMBB = FuncInfo.MBB;
1159   // In MSVC C++ and CoreCLR, catchblocks are funclets and need prologues.
1160   if (IsMSVCCXX || IsCoreCLR)
1161     CatchPadMBB->setIsEHFuncletEntry();
1162 
1163   DAG.setRoot(DAG.getNode(ISD::CATCHPAD, getCurSDLoc(), MVT::Other, getControlRoot()));
1164 }
1165 
1166 void SelectionDAGBuilder::visitCatchRet(const CatchReturnInst &I) {
1167   // Update machine-CFG edge.
1168   MachineBasicBlock *TargetMBB = FuncInfo.MBBMap[I.getSuccessor()];
1169   FuncInfo.MBB->addSuccessor(TargetMBB);
1170 
1171   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1172   bool IsSEH = isAsynchronousEHPersonality(Pers);
1173   if (IsSEH) {
1174     // If this is not a fall-through branch or optimizations are switched off,
1175     // emit the branch.
1176     if (TargetMBB != NextBlock(FuncInfo.MBB) ||
1177         TM.getOptLevel() == CodeGenOpt::None)
1178       DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
1179                               getControlRoot(), DAG.getBasicBlock(TargetMBB)));
1180     return;
1181   }
1182 
1183   // Figure out the funclet membership for the catchret's successor.
1184   // This will be used by the FuncletLayout pass to determine how to order the
1185   // BB's.
1186   // A 'catchret' returns to the outer scope's color.
1187   Value *ParentPad = I.getCatchSwitchParentPad();
1188   const BasicBlock *SuccessorColor;
1189   if (isa<ConstantTokenNone>(ParentPad))
1190     SuccessorColor = &FuncInfo.Fn->getEntryBlock();
1191   else
1192     SuccessorColor = cast<Instruction>(ParentPad)->getParent();
1193   assert(SuccessorColor && "No parent funclet for catchret!");
1194   MachineBasicBlock *SuccessorColorMBB = FuncInfo.MBBMap[SuccessorColor];
1195   assert(SuccessorColorMBB && "No MBB for SuccessorColor!");
1196 
1197   // Create the terminator node.
1198   SDValue Ret = DAG.getNode(ISD::CATCHRET, getCurSDLoc(), MVT::Other,
1199                             getControlRoot(), DAG.getBasicBlock(TargetMBB),
1200                             DAG.getBasicBlock(SuccessorColorMBB));
1201   DAG.setRoot(Ret);
1202 }
1203 
1204 void SelectionDAGBuilder::visitCleanupPad(const CleanupPadInst &CPI) {
1205   // Don't emit any special code for the cleanuppad instruction. It just marks
1206   // the start of a funclet.
1207   FuncInfo.MBB->setIsEHFuncletEntry();
1208   FuncInfo.MBB->setIsCleanupFuncletEntry();
1209 }
1210 
1211 /// When an invoke or a cleanupret unwinds to the next EH pad, there are
1212 /// many places it could ultimately go. In the IR, we have a single unwind
1213 /// destination, but in the machine CFG, we enumerate all the possible blocks.
1214 /// This function skips over imaginary basic blocks that hold catchswitch
1215 /// instructions, and finds all the "real" machine
1216 /// basic block destinations. As those destinations may not be successors of
1217 /// EHPadBB, here we also calculate the edge probability to those destinations.
1218 /// The passed-in Prob is the edge probability to EHPadBB.
1219 static void findUnwindDestinations(
1220     FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
1221     BranchProbability Prob,
1222     SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
1223         &UnwindDests) {
1224   EHPersonality Personality =
1225     classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1226   bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX;
1227   bool IsCoreCLR = Personality == EHPersonality::CoreCLR;
1228 
1229   while (EHPadBB) {
1230     const Instruction *Pad = EHPadBB->getFirstNonPHI();
1231     BasicBlock *NewEHPadBB = nullptr;
1232     if (isa<LandingPadInst>(Pad)) {
1233       // Stop on landingpads. They are not funclets.
1234       UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1235       break;
1236     } else if (isa<CleanupPadInst>(Pad)) {
1237       // Stop on cleanup pads. Cleanups are always funclet entries for all known
1238       // personalities.
1239       UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1240       UnwindDests.back().first->setIsEHFuncletEntry();
1241       break;
1242     } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
1243       // Add the catchpad handlers to the possible destinations.
1244       for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
1245         UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob);
1246         // For MSVC++ and the CLR, catchblocks are funclets and need prologues.
1247         if (IsMSVCCXX || IsCoreCLR)
1248           UnwindDests.back().first->setIsEHFuncletEntry();
1249       }
1250       NewEHPadBB = CatchSwitch->getUnwindDest();
1251     } else {
1252       continue;
1253     }
1254 
1255     BranchProbabilityInfo *BPI = FuncInfo.BPI;
1256     if (BPI && NewEHPadBB)
1257       Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB);
1258     EHPadBB = NewEHPadBB;
1259   }
1260 }
1261 
1262 void SelectionDAGBuilder::visitCleanupRet(const CleanupReturnInst &I) {
1263   // Update successor info.
1264   SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
1265   auto UnwindDest = I.getUnwindDest();
1266   BranchProbabilityInfo *BPI = FuncInfo.BPI;
1267   BranchProbability UnwindDestProb =
1268       (BPI && UnwindDest)
1269           ? BPI->getEdgeProbability(FuncInfo.MBB->getBasicBlock(), UnwindDest)
1270           : BranchProbability::getZero();
1271   findUnwindDestinations(FuncInfo, UnwindDest, UnwindDestProb, UnwindDests);
1272   for (auto &UnwindDest : UnwindDests) {
1273     UnwindDest.first->setIsEHPad();
1274     addSuccessorWithProb(FuncInfo.MBB, UnwindDest.first, UnwindDest.second);
1275   }
1276   FuncInfo.MBB->normalizeSuccProbs();
1277 
1278   // Create the terminator node.
1279   SDValue Ret =
1280       DAG.getNode(ISD::CLEANUPRET, getCurSDLoc(), MVT::Other, getControlRoot());
1281   DAG.setRoot(Ret);
1282 }
1283 
1284 void SelectionDAGBuilder::visitCatchSwitch(const CatchSwitchInst &CSI) {
1285   report_fatal_error("visitCatchSwitch not yet implemented!");
1286 }
1287 
1288 void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
1289   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1290   auto &DL = DAG.getDataLayout();
1291   SDValue Chain = getControlRoot();
1292   SmallVector<ISD::OutputArg, 8> Outs;
1293   SmallVector<SDValue, 8> OutVals;
1294 
1295   // Calls to @llvm.experimental.deoptimize don't generate a return value, so
1296   // lower
1297   //
1298   //   %val = call <ty> @llvm.experimental.deoptimize()
1299   //   ret <ty> %val
1300   //
1301   // differently.
1302   if (I.getParent()->getTerminatingDeoptimizeCall()) {
1303     LowerDeoptimizingReturn();
1304     return;
1305   }
1306 
1307   if (!FuncInfo.CanLowerReturn) {
1308     unsigned DemoteReg = FuncInfo.DemoteRegister;
1309     const Function *F = I.getParent()->getParent();
1310 
1311     // Emit a store of the return value through the virtual register.
1312     // Leave Outs empty so that LowerReturn won't try to load return
1313     // registers the usual way.
1314     SmallVector<EVT, 1> PtrValueVTs;
1315     ComputeValueVTs(TLI, DL, PointerType::getUnqual(F->getReturnType()),
1316                     PtrValueVTs);
1317 
1318     SDValue RetPtr = DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(),
1319                                         DemoteReg, PtrValueVTs[0]);
1320     SDValue RetOp = getValue(I.getOperand(0));
1321 
1322     SmallVector<EVT, 4> ValueVTs;
1323     SmallVector<uint64_t, 4> Offsets;
1324     ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs, &Offsets);
1325     unsigned NumValues = ValueVTs.size();
1326 
1327     // An aggregate return value cannot wrap around the address space, so
1328     // offsets to its parts don't wrap either.
1329     SDNodeFlags Flags;
1330     Flags.setNoUnsignedWrap(true);
1331 
1332     SmallVector<SDValue, 4> Chains(NumValues);
1333     for (unsigned i = 0; i != NumValues; ++i) {
1334       SDValue Add = DAG.getNode(ISD::ADD, getCurSDLoc(),
1335                                 RetPtr.getValueType(), RetPtr,
1336                                 DAG.getIntPtrConstant(Offsets[i],
1337                                                       getCurSDLoc()),
1338                                 Flags);
1339       Chains[i] = DAG.getStore(Chain, getCurSDLoc(),
1340                                SDValue(RetOp.getNode(), RetOp.getResNo() + i),
1341                                // FIXME: better loc info would be nice.
1342                                Add, MachinePointerInfo());
1343     }
1344 
1345     Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
1346                         MVT::Other, Chains);
1347   } else if (I.getNumOperands() != 0) {
1348     SmallVector<EVT, 4> ValueVTs;
1349     ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs);
1350     unsigned NumValues = ValueVTs.size();
1351     if (NumValues) {
1352       SDValue RetOp = getValue(I.getOperand(0));
1353 
1354       const Function *F = I.getParent()->getParent();
1355 
1356       ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
1357       if (F->getAttributes().hasAttribute(AttributeList::ReturnIndex,
1358                                           Attribute::SExt))
1359         ExtendKind = ISD::SIGN_EXTEND;
1360       else if (F->getAttributes().hasAttribute(AttributeList::ReturnIndex,
1361                                                Attribute::ZExt))
1362         ExtendKind = ISD::ZERO_EXTEND;
1363 
1364       LLVMContext &Context = F->getContext();
1365       bool RetInReg = F->getAttributes().hasAttribute(
1366           AttributeList::ReturnIndex, Attribute::InReg);
1367 
1368       for (unsigned j = 0; j != NumValues; ++j) {
1369         EVT VT = ValueVTs[j];
1370 
1371         if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
1372           VT = TLI.getTypeForExtReturn(Context, VT, ExtendKind);
1373 
1374         unsigned NumParts = TLI.getNumRegisters(Context, VT);
1375         MVT PartVT = TLI.getRegisterType(Context, VT);
1376         SmallVector<SDValue, 4> Parts(NumParts);
1377         getCopyToParts(DAG, getCurSDLoc(),
1378                        SDValue(RetOp.getNode(), RetOp.getResNo() + j),
1379                        &Parts[0], NumParts, PartVT, &I, ExtendKind);
1380 
1381         // 'inreg' on function refers to return value
1382         ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1383         if (RetInReg)
1384           Flags.setInReg();
1385 
1386         // Propagate extension type if any
1387         if (ExtendKind == ISD::SIGN_EXTEND)
1388           Flags.setSExt();
1389         else if (ExtendKind == ISD::ZERO_EXTEND)
1390           Flags.setZExt();
1391 
1392         for (unsigned i = 0; i < NumParts; ++i) {
1393           Outs.push_back(ISD::OutputArg(Flags, Parts[i].getValueType(),
1394                                         VT, /*isfixed=*/true, 0, 0));
1395           OutVals.push_back(Parts[i]);
1396         }
1397       }
1398     }
1399   }
1400 
1401   // Push in swifterror virtual register as the last element of Outs. This makes
1402   // sure swifterror virtual register will be returned in the swifterror
1403   // physical register.
1404   const Function *F = I.getParent()->getParent();
1405   if (TLI.supportSwiftError() &&
1406       F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
1407     assert(FuncInfo.SwiftErrorArg && "Need a swift error argument");
1408     ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1409     Flags.setSwiftError();
1410     Outs.push_back(ISD::OutputArg(Flags, EVT(TLI.getPointerTy(DL)) /*vt*/,
1411                                   EVT(TLI.getPointerTy(DL)) /*argvt*/,
1412                                   true /*isfixed*/, 1 /*origidx*/,
1413                                   0 /*partOffs*/));
1414     // Create SDNode for the swifterror virtual register.
1415     OutVals.push_back(DAG.getRegister(FuncInfo.getOrCreateSwiftErrorVReg(
1416                                           FuncInfo.MBB, FuncInfo.SwiftErrorArg),
1417                                       EVT(TLI.getPointerTy(DL))));
1418   }
1419 
1420   bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
1421   CallingConv::ID CallConv =
1422     DAG.getMachineFunction().getFunction()->getCallingConv();
1423   Chain = DAG.getTargetLoweringInfo().LowerReturn(
1424       Chain, CallConv, isVarArg, Outs, OutVals, getCurSDLoc(), DAG);
1425 
1426   // Verify that the target's LowerReturn behaved as expected.
1427   assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
1428          "LowerReturn didn't return a valid chain!");
1429 
1430   // Update the DAG with the new chain value resulting from return lowering.
1431   DAG.setRoot(Chain);
1432 }
1433 
1434 /// CopyToExportRegsIfNeeded - If the given value has virtual registers
1435 /// created for it, emit nodes to copy the value into the virtual
1436 /// registers.
1437 void SelectionDAGBuilder::CopyToExportRegsIfNeeded(const Value *V) {
1438   // Skip empty types
1439   if (V->getType()->isEmptyTy())
1440     return;
1441 
1442   DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
1443   if (VMI != FuncInfo.ValueMap.end()) {
1444     assert(!V->use_empty() && "Unused value assigned virtual registers!");
1445     CopyValueToVirtualRegister(V, VMI->second);
1446   }
1447 }
1448 
1449 /// ExportFromCurrentBlock - If this condition isn't known to be exported from
1450 /// the current basic block, add it to ValueMap now so that we'll get a
1451 /// CopyTo/FromReg.
1452 void SelectionDAGBuilder::ExportFromCurrentBlock(const Value *V) {
1453   // No need to export constants.
1454   if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
1455 
1456   // Already exported?
1457   if (FuncInfo.isExportedInst(V)) return;
1458 
1459   unsigned Reg = FuncInfo.InitializeRegForValue(V);
1460   CopyValueToVirtualRegister(V, Reg);
1461 }
1462 
1463 bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V,
1464                                                      const BasicBlock *FromBB) {
1465   // The operands of the setcc have to be in this block.  We don't know
1466   // how to export them from some other block.
1467   if (const Instruction *VI = dyn_cast<Instruction>(V)) {
1468     // Can export from current BB.
1469     if (VI->getParent() == FromBB)
1470       return true;
1471 
1472     // Is already exported, noop.
1473     return FuncInfo.isExportedInst(V);
1474   }
1475 
1476   // If this is an argument, we can export it if the BB is the entry block or
1477   // if it is already exported.
1478   if (isa<Argument>(V)) {
1479     if (FromBB == &FromBB->getParent()->getEntryBlock())
1480       return true;
1481 
1482     // Otherwise, can only export this if it is already exported.
1483     return FuncInfo.isExportedInst(V);
1484   }
1485 
1486   // Otherwise, constants can always be exported.
1487   return true;
1488 }
1489 
1490 /// Return branch probability calculated by BranchProbabilityInfo for IR blocks.
1491 BranchProbability
1492 SelectionDAGBuilder::getEdgeProbability(const MachineBasicBlock *Src,
1493                                         const MachineBasicBlock *Dst) const {
1494   BranchProbabilityInfo *BPI = FuncInfo.BPI;
1495   const BasicBlock *SrcBB = Src->getBasicBlock();
1496   const BasicBlock *DstBB = Dst->getBasicBlock();
1497   if (!BPI) {
1498     // If BPI is not available, set the default probability as 1 / N, where N is
1499     // the number of successors.
1500     auto SuccSize = std::max<uint32_t>(
1501         std::distance(succ_begin(SrcBB), succ_end(SrcBB)), 1);
1502     return BranchProbability(1, SuccSize);
1503   }
1504   return BPI->getEdgeProbability(SrcBB, DstBB);
1505 }
1506 
1507 void SelectionDAGBuilder::addSuccessorWithProb(MachineBasicBlock *Src,
1508                                                MachineBasicBlock *Dst,
1509                                                BranchProbability Prob) {
1510   if (!FuncInfo.BPI)
1511     Src->addSuccessorWithoutProb(Dst);
1512   else {
1513     if (Prob.isUnknown())
1514       Prob = getEdgeProbability(Src, Dst);
1515     Src->addSuccessor(Dst, Prob);
1516   }
1517 }
1518 
1519 static bool InBlock(const Value *V, const BasicBlock *BB) {
1520   if (const Instruction *I = dyn_cast<Instruction>(V))
1521     return I->getParent() == BB;
1522   return true;
1523 }
1524 
1525 /// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
1526 /// This function emits a branch and is used at the leaves of an OR or an
1527 /// AND operator tree.
1528 ///
1529 void
1530 SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
1531                                                   MachineBasicBlock *TBB,
1532                                                   MachineBasicBlock *FBB,
1533                                                   MachineBasicBlock *CurBB,
1534                                                   MachineBasicBlock *SwitchBB,
1535                                                   BranchProbability TProb,
1536                                                   BranchProbability FProb,
1537                                                   bool InvertCond) {
1538   const BasicBlock *BB = CurBB->getBasicBlock();
1539 
1540   // If the leaf of the tree is a comparison, merge the condition into
1541   // the caseblock.
1542   if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
1543     // The operands of the cmp have to be in this block.  We don't know
1544     // how to export them from some other block.  If this is the first block
1545     // of the sequence, no exporting is needed.
1546     if (CurBB == SwitchBB ||
1547         (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
1548          isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
1549       ISD::CondCode Condition;
1550       if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
1551         ICmpInst::Predicate Pred =
1552             InvertCond ? IC->getInversePredicate() : IC->getPredicate();
1553         Condition = getICmpCondCode(Pred);
1554       } else {
1555         const FCmpInst *FC = cast<FCmpInst>(Cond);
1556         FCmpInst::Predicate Pred =
1557             InvertCond ? FC->getInversePredicate() : FC->getPredicate();
1558         Condition = getFCmpCondCode(Pred);
1559         if (TM.Options.NoNaNsFPMath)
1560           Condition = getFCmpCodeWithoutNaN(Condition);
1561       }
1562 
1563       CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1), nullptr,
1564                    TBB, FBB, CurBB, TProb, FProb);
1565       SwitchCases.push_back(CB);
1566       return;
1567     }
1568   }
1569 
1570   // Create a CaseBlock record representing this branch.
1571   ISD::CondCode Opc = InvertCond ? ISD::SETNE : ISD::SETEQ;
1572   CaseBlock CB(Opc, Cond, ConstantInt::getTrue(*DAG.getContext()),
1573                nullptr, TBB, FBB, CurBB, TProb, FProb);
1574   SwitchCases.push_back(CB);
1575 }
1576 
1577 /// FindMergedConditions - If Cond is an expression like
1578 void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
1579                                                MachineBasicBlock *TBB,
1580                                                MachineBasicBlock *FBB,
1581                                                MachineBasicBlock *CurBB,
1582                                                MachineBasicBlock *SwitchBB,
1583                                                Instruction::BinaryOps Opc,
1584                                                BranchProbability TProb,
1585                                                BranchProbability FProb,
1586                                                bool InvertCond) {
1587   // Skip over not part of the tree and remember to invert op and operands at
1588   // next level.
1589   if (BinaryOperator::isNot(Cond) && Cond->hasOneUse()) {
1590     const Value *CondOp = BinaryOperator::getNotArgument(Cond);
1591     if (InBlock(CondOp, CurBB->getBasicBlock())) {
1592       FindMergedConditions(CondOp, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
1593                            !InvertCond);
1594       return;
1595     }
1596   }
1597 
1598   const Instruction *BOp = dyn_cast<Instruction>(Cond);
1599   // Compute the effective opcode for Cond, taking into account whether it needs
1600   // to be inverted, e.g.
1601   //   and (not (or A, B)), C
1602   // gets lowered as
1603   //   and (and (not A, not B), C)
1604   unsigned BOpc = 0;
1605   if (BOp) {
1606     BOpc = BOp->getOpcode();
1607     if (InvertCond) {
1608       if (BOpc == Instruction::And)
1609         BOpc = Instruction::Or;
1610       else if (BOpc == Instruction::Or)
1611         BOpc = Instruction::And;
1612     }
1613   }
1614 
1615   // If this node is not part of the or/and tree, emit it as a branch.
1616   if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) ||
1617       BOpc != Opc || !BOp->hasOneUse() ||
1618       BOp->getParent() != CurBB->getBasicBlock() ||
1619       !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) ||
1620       !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) {
1621     EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB,
1622                                  TProb, FProb, InvertCond);
1623     return;
1624   }
1625 
1626   //  Create TmpBB after CurBB.
1627   MachineFunction::iterator BBI(CurBB);
1628   MachineFunction &MF = DAG.getMachineFunction();
1629   MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
1630   CurBB->getParent()->insert(++BBI, TmpBB);
1631 
1632   if (Opc == Instruction::Or) {
1633     // Codegen X | Y as:
1634     // BB1:
1635     //   jmp_if_X TBB
1636     //   jmp TmpBB
1637     // TmpBB:
1638     //   jmp_if_Y TBB
1639     //   jmp FBB
1640     //
1641 
1642     // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
1643     // The requirement is that
1644     //   TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
1645     //     = TrueProb for original BB.
1646     // Assuming the original probabilities are A and B, one choice is to set
1647     // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
1648     // A/(1+B) and 2B/(1+B). This choice assumes that
1649     //   TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
1650     // Another choice is to assume TrueProb for BB1 equals to TrueProb for
1651     // TmpBB, but the math is more complicated.
1652 
1653     auto NewTrueProb = TProb / 2;
1654     auto NewFalseProb = TProb / 2 + FProb;
1655     // Emit the LHS condition.
1656     FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, SwitchBB, Opc,
1657                          NewTrueProb, NewFalseProb, InvertCond);
1658 
1659     // Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
1660     SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb};
1661     BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
1662     // Emit the RHS condition into TmpBB.
1663     FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc,
1664                          Probs[0], Probs[1], InvertCond);
1665   } else {
1666     assert(Opc == Instruction::And && "Unknown merge op!");
1667     // Codegen X & Y as:
1668     // BB1:
1669     //   jmp_if_X TmpBB
1670     //   jmp FBB
1671     // TmpBB:
1672     //   jmp_if_Y TBB
1673     //   jmp FBB
1674     //
1675     //  This requires creation of TmpBB after CurBB.
1676 
1677     // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
1678     // The requirement is that
1679     //   FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
1680     //     = FalseProb for original BB.
1681     // Assuming the original probabilities are A and B, one choice is to set
1682     // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
1683     // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
1684     // TrueProb for BB1 * FalseProb for TmpBB.
1685 
1686     auto NewTrueProb = TProb + FProb / 2;
1687     auto NewFalseProb = FProb / 2;
1688     // Emit the LHS condition.
1689     FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, SwitchBB, Opc,
1690                          NewTrueProb, NewFalseProb, InvertCond);
1691 
1692     // Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
1693     SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2};
1694     BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
1695     // Emit the RHS condition into TmpBB.
1696     FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc,
1697                          Probs[0], Probs[1], InvertCond);
1698   }
1699 }
1700 
1701 /// If the set of cases should be emitted as a series of branches, return true.
1702 /// If we should emit this as a bunch of and/or'd together conditions, return
1703 /// false.
1704 bool
1705 SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases) {
1706   if (Cases.size() != 2) return true;
1707 
1708   // If this is two comparisons of the same values or'd or and'd together, they
1709   // will get folded into a single comparison, so don't emit two blocks.
1710   if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
1711        Cases[0].CmpRHS == Cases[1].CmpRHS) ||
1712       (Cases[0].CmpRHS == Cases[1].CmpLHS &&
1713        Cases[0].CmpLHS == Cases[1].CmpRHS)) {
1714     return false;
1715   }
1716 
1717   // Handle: (X != null) | (Y != null) --> (X|Y) != 0
1718   // Handle: (X == null) & (Y == null) --> (X|Y) == 0
1719   if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
1720       Cases[0].CC == Cases[1].CC &&
1721       isa<Constant>(Cases[0].CmpRHS) &&
1722       cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
1723     if (Cases[0].CC == ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
1724       return false;
1725     if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
1726       return false;
1727   }
1728 
1729   return true;
1730 }
1731 
1732 void SelectionDAGBuilder::visitBr(const BranchInst &I) {
1733   MachineBasicBlock *BrMBB = FuncInfo.MBB;
1734 
1735   // Update machine-CFG edges.
1736   MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
1737 
1738   if (I.isUnconditional()) {
1739     // Update machine-CFG edges.
1740     BrMBB->addSuccessor(Succ0MBB);
1741 
1742     // If this is not a fall-through branch or optimizations are switched off,
1743     // emit the branch.
1744     if (Succ0MBB != NextBlock(BrMBB) || TM.getOptLevel() == CodeGenOpt::None)
1745       DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
1746                               MVT::Other, getControlRoot(),
1747                               DAG.getBasicBlock(Succ0MBB)));
1748 
1749     return;
1750   }
1751 
1752   // If this condition is one of the special cases we handle, do special stuff
1753   // now.
1754   const Value *CondVal = I.getCondition();
1755   MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
1756 
1757   // If this is a series of conditions that are or'd or and'd together, emit
1758   // this as a sequence of branches instead of setcc's with and/or operations.
1759   // As long as jumps are not expensive, this should improve performance.
1760   // For example, instead of something like:
1761   //     cmp A, B
1762   //     C = seteq
1763   //     cmp D, E
1764   //     F = setle
1765   //     or C, F
1766   //     jnz foo
1767   // Emit:
1768   //     cmp A, B
1769   //     je foo
1770   //     cmp D, E
1771   //     jle foo
1772   //
1773   if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
1774     Instruction::BinaryOps Opcode = BOp->getOpcode();
1775     if (!DAG.getTargetLoweringInfo().isJumpExpensive() && BOp->hasOneUse() &&
1776         !I.getMetadata(LLVMContext::MD_unpredictable) &&
1777         (Opcode == Instruction::And || Opcode == Instruction::Or)) {
1778       FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB,
1779                            Opcode,
1780                            getEdgeProbability(BrMBB, Succ0MBB),
1781                            getEdgeProbability(BrMBB, Succ1MBB),
1782                            /*InvertCond=*/false);
1783       // If the compares in later blocks need to use values not currently
1784       // exported from this block, export them now.  This block should always
1785       // be the first entry.
1786       assert(SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!");
1787 
1788       // Allow some cases to be rejected.
1789       if (ShouldEmitAsBranches(SwitchCases)) {
1790         for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) {
1791           ExportFromCurrentBlock(SwitchCases[i].CmpLHS);
1792           ExportFromCurrentBlock(SwitchCases[i].CmpRHS);
1793         }
1794 
1795         // Emit the branch for this block.
1796         visitSwitchCase(SwitchCases[0], BrMBB);
1797         SwitchCases.erase(SwitchCases.begin());
1798         return;
1799       }
1800 
1801       // Okay, we decided not to do this, remove any inserted MBB's and clear
1802       // SwitchCases.
1803       for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i)
1804         FuncInfo.MF->erase(SwitchCases[i].ThisBB);
1805 
1806       SwitchCases.clear();
1807     }
1808   }
1809 
1810   // Create a CaseBlock record representing this branch.
1811   CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
1812                nullptr, Succ0MBB, Succ1MBB, BrMBB);
1813 
1814   // Use visitSwitchCase to actually insert the fast branch sequence for this
1815   // cond branch.
1816   visitSwitchCase(CB, BrMBB);
1817 }
1818 
1819 /// visitSwitchCase - Emits the necessary code to represent a single node in
1820 /// the binary search tree resulting from lowering a switch instruction.
1821 void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB,
1822                                           MachineBasicBlock *SwitchBB) {
1823   SDValue Cond;
1824   SDValue CondLHS = getValue(CB.CmpLHS);
1825   SDLoc dl = getCurSDLoc();
1826 
1827   // Build the setcc now.
1828   if (!CB.CmpMHS) {
1829     // Fold "(X == true)" to X and "(X == false)" to !X to
1830     // handle common cases produced by branch lowering.
1831     if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
1832         CB.CC == ISD::SETEQ)
1833       Cond = CondLHS;
1834     else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) &&
1835              CB.CC == ISD::SETEQ) {
1836       SDValue True = DAG.getConstant(1, dl, CondLHS.getValueType());
1837       Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
1838     } else
1839       Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC);
1840   } else {
1841     assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
1842 
1843     const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
1844     const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
1845 
1846     SDValue CmpOp = getValue(CB.CmpMHS);
1847     EVT VT = CmpOp.getValueType();
1848 
1849     if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
1850       Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, dl, VT),
1851                           ISD::SETLE);
1852     } else {
1853       SDValue SUB = DAG.getNode(ISD::SUB, dl,
1854                                 VT, CmpOp, DAG.getConstant(Low, dl, VT));
1855       Cond = DAG.getSetCC(dl, MVT::i1, SUB,
1856                           DAG.getConstant(High-Low, dl, VT), ISD::SETULE);
1857     }
1858   }
1859 
1860   // Update successor info
1861   addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
1862   // TrueBB and FalseBB are always different unless the incoming IR is
1863   // degenerate. This only happens when running llc on weird IR.
1864   if (CB.TrueBB != CB.FalseBB)
1865     addSuccessorWithProb(SwitchBB, CB.FalseBB, CB.FalseProb);
1866   SwitchBB->normalizeSuccProbs();
1867 
1868   // If the lhs block is the next block, invert the condition so that we can
1869   // fall through to the lhs instead of the rhs block.
1870   if (CB.TrueBB == NextBlock(SwitchBB)) {
1871     std::swap(CB.TrueBB, CB.FalseBB);
1872     SDValue True = DAG.getConstant(1, dl, Cond.getValueType());
1873     Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
1874   }
1875 
1876   SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
1877                                MVT::Other, getControlRoot(), Cond,
1878                                DAG.getBasicBlock(CB.TrueBB));
1879 
1880   // Insert the false branch. Do this even if it's a fall through branch,
1881   // this makes it easier to do DAG optimizations which require inverting
1882   // the branch condition.
1883   BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
1884                        DAG.getBasicBlock(CB.FalseBB));
1885 
1886   DAG.setRoot(BrCond);
1887 }
1888 
1889 /// visitJumpTable - Emit JumpTable node in the current MBB
1890 void SelectionDAGBuilder::visitJumpTable(JumpTable &JT) {
1891   // Emit the code for the jump table
1892   assert(JT.Reg != -1U && "Should lower JT Header first!");
1893   EVT PTy = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
1894   SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurSDLoc(),
1895                                      JT.Reg, PTy);
1896   SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
1897   SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, getCurSDLoc(),
1898                                     MVT::Other, Index.getValue(1),
1899                                     Table, Index);
1900   DAG.setRoot(BrJumpTable);
1901 }
1902 
1903 /// visitJumpTableHeader - This function emits necessary code to produce index
1904 /// in the JumpTable from switch case.
1905 void SelectionDAGBuilder::visitJumpTableHeader(JumpTable &JT,
1906                                                JumpTableHeader &JTH,
1907                                                MachineBasicBlock *SwitchBB) {
1908   SDLoc dl = getCurSDLoc();
1909 
1910   // Subtract the lowest switch case value from the value being switched on and
1911   // conditional branch to default mbb if the result is greater than the
1912   // difference between smallest and largest cases.
1913   SDValue SwitchOp = getValue(JTH.SValue);
1914   EVT VT = SwitchOp.getValueType();
1915   SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp,
1916                             DAG.getConstant(JTH.First, dl, VT));
1917 
1918   // The SDNode we just created, which holds the value being switched on minus
1919   // the smallest case value, needs to be copied to a virtual register so it
1920   // can be used as an index into the jump table in a subsequent basic block.
1921   // This value may be smaller or larger than the target's pointer type, and
1922   // therefore require extension or truncating.
1923   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1924   SwitchOp = DAG.getZExtOrTrunc(Sub, dl, TLI.getPointerTy(DAG.getDataLayout()));
1925 
1926   unsigned JumpTableReg =
1927       FuncInfo.CreateReg(TLI.getPointerTy(DAG.getDataLayout()));
1928   SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl,
1929                                     JumpTableReg, SwitchOp);
1930   JT.Reg = JumpTableReg;
1931 
1932   // Emit the range check for the jump table, and branch to the default block
1933   // for the switch statement if the value being switched on exceeds the largest
1934   // case in the switch.
1935   SDValue CMP = DAG.getSetCC(
1936       dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
1937                                  Sub.getValueType()),
1938       Sub, DAG.getConstant(JTH.Last - JTH.First, dl, VT), ISD::SETUGT);
1939 
1940   SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
1941                                MVT::Other, CopyTo, CMP,
1942                                DAG.getBasicBlock(JT.Default));
1943 
1944   // Avoid emitting unnecessary branches to the next block.
1945   if (JT.MBB != NextBlock(SwitchBB))
1946     BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
1947                          DAG.getBasicBlock(JT.MBB));
1948 
1949   DAG.setRoot(BrCond);
1950 }
1951 
1952 /// Create a LOAD_STACK_GUARD node, and let it carry the target specific global
1953 /// variable if there exists one.
1954 static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL,
1955                                  SDValue &Chain) {
1956   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1957   EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
1958   MachineFunction &MF = DAG.getMachineFunction();
1959   Value *Global = TLI.getSDagStackGuard(*MF.getFunction()->getParent());
1960   MachineSDNode *Node =
1961       DAG.getMachineNode(TargetOpcode::LOAD_STACK_GUARD, DL, PtrTy, Chain);
1962   if (Global) {
1963     MachinePointerInfo MPInfo(Global);
1964     MachineInstr::mmo_iterator MemRefs = MF.allocateMemRefsArray(1);
1965     auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
1966                  MachineMemOperand::MODereferenceable;
1967     *MemRefs = MF.getMachineMemOperand(MPInfo, Flags, PtrTy.getSizeInBits() / 8,
1968                                        DAG.getEVTAlignment(PtrTy));
1969     Node->setMemRefs(MemRefs, MemRefs + 1);
1970   }
1971   return SDValue(Node, 0);
1972 }
1973 
1974 /// Codegen a new tail for a stack protector check ParentMBB which has had its
1975 /// tail spliced into a stack protector check success bb.
1976 ///
1977 /// For a high level explanation of how this fits into the stack protector
1978 /// generation see the comment on the declaration of class
1979 /// StackProtectorDescriptor.
1980 void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
1981                                                   MachineBasicBlock *ParentBB) {
1982 
1983   // First create the loads to the guard/stack slot for the comparison.
1984   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1985   EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
1986 
1987   MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
1988   int FI = MFI.getStackProtectorIndex();
1989 
1990   SDValue Guard;
1991   SDLoc dl = getCurSDLoc();
1992   SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy);
1993   const Module &M = *ParentBB->getParent()->getFunction()->getParent();
1994   unsigned Align = DL->getPrefTypeAlignment(Type::getInt8PtrTy(M.getContext()));
1995 
1996   // Generate code to load the content of the guard slot.
1997   SDValue StackSlot = DAG.getLoad(
1998       PtrTy, dl, DAG.getEntryNode(), StackSlotPtr,
1999       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), Align,
2000       MachineMemOperand::MOVolatile);
2001 
2002   // Retrieve guard check function, nullptr if instrumentation is inlined.
2003   if (const Value *GuardCheck = TLI.getSSPStackGuardCheck(M)) {
2004     // The target provides a guard check function to validate the guard value.
2005     // Generate a call to that function with the content of the guard slot as
2006     // argument.
2007     auto *Fn = cast<Function>(GuardCheck);
2008     FunctionType *FnTy = Fn->getFunctionType();
2009     assert(FnTy->getNumParams() == 1 && "Invalid function signature");
2010 
2011     TargetLowering::ArgListTy Args;
2012     TargetLowering::ArgListEntry Entry;
2013     Entry.Node = StackSlot;
2014     Entry.Ty = FnTy->getParamType(0);
2015     if (Fn->hasAttribute(1, Attribute::AttrKind::InReg))
2016       Entry.IsInReg = true;
2017     Args.push_back(Entry);
2018 
2019     TargetLowering::CallLoweringInfo CLI(DAG);
2020     CLI.setDebugLoc(getCurSDLoc())
2021       .setChain(DAG.getEntryNode())
2022       .setCallee(Fn->getCallingConv(), FnTy->getReturnType(),
2023                  getValue(GuardCheck), std::move(Args));
2024 
2025     std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
2026     DAG.setRoot(Result.second);
2027     return;
2028   }
2029 
2030   // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
2031   // Otherwise, emit a volatile load to retrieve the stack guard value.
2032   SDValue Chain = DAG.getEntryNode();
2033   if (TLI.useLoadStackGuardNode()) {
2034     Guard = getLoadStackGuard(DAG, dl, Chain);
2035   } else {
2036     const Value *IRGuard = TLI.getSDagStackGuard(M);
2037     SDValue GuardPtr = getValue(IRGuard);
2038 
2039     Guard =
2040         DAG.getLoad(PtrTy, dl, Chain, GuardPtr, MachinePointerInfo(IRGuard, 0),
2041                     Align, MachineMemOperand::MOVolatile);
2042   }
2043 
2044   // Perform the comparison via a subtract/getsetcc.
2045   EVT VT = Guard.getValueType();
2046   SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, Guard, StackSlot);
2047 
2048   SDValue Cmp = DAG.getSetCC(dl, TLI.getSetCCResultType(DAG.getDataLayout(),
2049                                                         *DAG.getContext(),
2050                                                         Sub.getValueType()),
2051                              Sub, DAG.getConstant(0, dl, VT), ISD::SETNE);
2052 
2053   // If the sub is not 0, then we know the guard/stackslot do not equal, so
2054   // branch to failure MBB.
2055   SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2056                                MVT::Other, StackSlot.getOperand(0),
2057                                Cmp, DAG.getBasicBlock(SPD.getFailureMBB()));
2058   // Otherwise branch to success MBB.
2059   SDValue Br = DAG.getNode(ISD::BR, dl,
2060                            MVT::Other, BrCond,
2061                            DAG.getBasicBlock(SPD.getSuccessMBB()));
2062 
2063   DAG.setRoot(Br);
2064 }
2065 
2066 /// Codegen the failure basic block for a stack protector check.
2067 ///
2068 /// A failure stack protector machine basic block consists simply of a call to
2069 /// __stack_chk_fail().
2070 ///
2071 /// For a high level explanation of how this fits into the stack protector
2072 /// generation see the comment on the declaration of class
2073 /// StackProtectorDescriptor.
2074 void
2075 SelectionDAGBuilder::visitSPDescriptorFailure(StackProtectorDescriptor &SPD) {
2076   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2077   SDValue Chain =
2078       TLI.makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid,
2079                       None, false, getCurSDLoc(), false, false).second;
2080   DAG.setRoot(Chain);
2081 }
2082 
2083 /// visitBitTestHeader - This function emits necessary code to produce value
2084 /// suitable for "bit tests"
2085 void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
2086                                              MachineBasicBlock *SwitchBB) {
2087   SDLoc dl = getCurSDLoc();
2088 
2089   // Subtract the minimum value
2090   SDValue SwitchOp = getValue(B.SValue);
2091   EVT VT = SwitchOp.getValueType();
2092   SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp,
2093                             DAG.getConstant(B.First, dl, VT));
2094 
2095   // Check range
2096   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2097   SDValue RangeCmp = DAG.getSetCC(
2098       dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
2099                                  Sub.getValueType()),
2100       Sub, DAG.getConstant(B.Range, dl, VT), ISD::SETUGT);
2101 
2102   // Determine the type of the test operands.
2103   bool UsePtrType = false;
2104   if (!TLI.isTypeLegal(VT))
2105     UsePtrType = true;
2106   else {
2107     for (unsigned i = 0, e = B.Cases.size(); i != e; ++i)
2108       if (!isUIntN(VT.getSizeInBits(), B.Cases[i].Mask)) {
2109         // Switch table case range are encoded into series of masks.
2110         // Just use pointer type, it's guaranteed to fit.
2111         UsePtrType = true;
2112         break;
2113       }
2114   }
2115   if (UsePtrType) {
2116     VT = TLI.getPointerTy(DAG.getDataLayout());
2117     Sub = DAG.getZExtOrTrunc(Sub, dl, VT);
2118   }
2119 
2120   B.RegVT = VT.getSimpleVT();
2121   B.Reg = FuncInfo.CreateReg(B.RegVT);
2122   SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl, B.Reg, Sub);
2123 
2124   MachineBasicBlock* MBB = B.Cases[0].ThisBB;
2125 
2126   addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb);
2127   addSuccessorWithProb(SwitchBB, MBB, B.Prob);
2128   SwitchBB->normalizeSuccProbs();
2129 
2130   SDValue BrRange = DAG.getNode(ISD::BRCOND, dl,
2131                                 MVT::Other, CopyTo, RangeCmp,
2132                                 DAG.getBasicBlock(B.Default));
2133 
2134   // Avoid emitting unnecessary branches to the next block.
2135   if (MBB != NextBlock(SwitchBB))
2136     BrRange = DAG.getNode(ISD::BR, dl, MVT::Other, BrRange,
2137                           DAG.getBasicBlock(MBB));
2138 
2139   DAG.setRoot(BrRange);
2140 }
2141 
2142 /// visitBitTestCase - this function produces one "bit test"
2143 void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB,
2144                                            MachineBasicBlock* NextMBB,
2145                                            BranchProbability BranchProbToNext,
2146                                            unsigned Reg,
2147                                            BitTestCase &B,
2148                                            MachineBasicBlock *SwitchBB) {
2149   SDLoc dl = getCurSDLoc();
2150   MVT VT = BB.RegVT;
2151   SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), dl, Reg, VT);
2152   SDValue Cmp;
2153   unsigned PopCount = countPopulation(B.Mask);
2154   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2155   if (PopCount == 1) {
2156     // Testing for a single bit; just compare the shift count with what it
2157     // would need to be to shift a 1 bit in that position.
2158     Cmp = DAG.getSetCC(
2159         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2160         ShiftOp, DAG.getConstant(countTrailingZeros(B.Mask), dl, VT),
2161         ISD::SETEQ);
2162   } else if (PopCount == BB.Range) {
2163     // There is only one zero bit in the range, test for it directly.
2164     Cmp = DAG.getSetCC(
2165         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2166         ShiftOp, DAG.getConstant(countTrailingOnes(B.Mask), dl, VT),
2167         ISD::SETNE);
2168   } else {
2169     // Make desired shift
2170     SDValue SwitchVal = DAG.getNode(ISD::SHL, dl, VT,
2171                                     DAG.getConstant(1, dl, VT), ShiftOp);
2172 
2173     // Emit bit tests and jumps
2174     SDValue AndOp = DAG.getNode(ISD::AND, dl,
2175                                 VT, SwitchVal, DAG.getConstant(B.Mask, dl, VT));
2176     Cmp = DAG.getSetCC(
2177         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2178         AndOp, DAG.getConstant(0, dl, VT), ISD::SETNE);
2179   }
2180 
2181   // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
2182   addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb);
2183   // The branch probability from SwitchBB to NextMBB is BranchProbToNext.
2184   addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
2185   // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is
2186   // one as they are relative probabilities (and thus work more like weights),
2187   // and hence we need to normalize them to let the sum of them become one.
2188   SwitchBB->normalizeSuccProbs();
2189 
2190   SDValue BrAnd = DAG.getNode(ISD::BRCOND, dl,
2191                               MVT::Other, getControlRoot(),
2192                               Cmp, DAG.getBasicBlock(B.TargetBB));
2193 
2194   // Avoid emitting unnecessary branches to the next block.
2195   if (NextMBB != NextBlock(SwitchBB))
2196     BrAnd = DAG.getNode(ISD::BR, dl, MVT::Other, BrAnd,
2197                         DAG.getBasicBlock(NextMBB));
2198 
2199   DAG.setRoot(BrAnd);
2200 }
2201 
2202 void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
2203   MachineBasicBlock *InvokeMBB = FuncInfo.MBB;
2204 
2205   // Retrieve successors. Look through artificial IR level blocks like
2206   // catchswitch for successors.
2207   MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
2208   const BasicBlock *EHPadBB = I.getSuccessor(1);
2209 
2210   // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
2211   // have to do anything here to lower funclet bundles.
2212   assert(!I.hasOperandBundlesOtherThan(
2213              {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) &&
2214          "Cannot lower invokes with arbitrary operand bundles yet!");
2215 
2216   const Value *Callee(I.getCalledValue());
2217   const Function *Fn = dyn_cast<Function>(Callee);
2218   if (isa<InlineAsm>(Callee))
2219     visitInlineAsm(&I);
2220   else if (Fn && Fn->isIntrinsic()) {
2221     switch (Fn->getIntrinsicID()) {
2222     default:
2223       llvm_unreachable("Cannot invoke this intrinsic");
2224     case Intrinsic::donothing:
2225       // Ignore invokes to @llvm.donothing: jump directly to the next BB.
2226       break;
2227     case Intrinsic::experimental_patchpoint_void:
2228     case Intrinsic::experimental_patchpoint_i64:
2229       visitPatchpoint(&I, EHPadBB);
2230       break;
2231     case Intrinsic::experimental_gc_statepoint:
2232       LowerStatepoint(ImmutableStatepoint(&I), EHPadBB);
2233       break;
2234     }
2235   } else if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) {
2236     // Currently we do not lower any intrinsic calls with deopt operand bundles.
2237     // Eventually we will support lowering the @llvm.experimental.deoptimize
2238     // intrinsic, and right now there are no plans to support other intrinsics
2239     // with deopt state.
2240     LowerCallSiteWithDeoptBundle(&I, getValue(Callee), EHPadBB);
2241   } else {
2242     LowerCallTo(&I, getValue(Callee), false, EHPadBB);
2243   }
2244 
2245   // If the value of the invoke is used outside of its defining block, make it
2246   // available as a virtual register.
2247   // We already took care of the exported value for the statepoint instruction
2248   // during call to the LowerStatepoint.
2249   if (!isStatepoint(I)) {
2250     CopyToExportRegsIfNeeded(&I);
2251   }
2252 
2253   SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
2254   BranchProbabilityInfo *BPI = FuncInfo.BPI;
2255   BranchProbability EHPadBBProb =
2256       BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB)
2257           : BranchProbability::getZero();
2258   findUnwindDestinations(FuncInfo, EHPadBB, EHPadBBProb, UnwindDests);
2259 
2260   // Update successor info.
2261   addSuccessorWithProb(InvokeMBB, Return);
2262   for (auto &UnwindDest : UnwindDests) {
2263     UnwindDest.first->setIsEHPad();
2264     addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
2265   }
2266   InvokeMBB->normalizeSuccProbs();
2267 
2268   // Drop into normal successor.
2269   DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
2270                           MVT::Other, getControlRoot(),
2271                           DAG.getBasicBlock(Return)));
2272 }
2273 
2274 void SelectionDAGBuilder::visitResume(const ResumeInst &RI) {
2275   llvm_unreachable("SelectionDAGBuilder shouldn't visit resume instructions!");
2276 }
2277 
2278 void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) {
2279   assert(FuncInfo.MBB->isEHPad() &&
2280          "Call to landingpad not in landing pad!");
2281 
2282   MachineBasicBlock *MBB = FuncInfo.MBB;
2283   addLandingPadInfo(LP, *MBB);
2284 
2285   // If there aren't registers to copy the values into (e.g., during SjLj
2286   // exceptions), then don't bother to create these DAG nodes.
2287   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2288   const Constant *PersonalityFn = FuncInfo.Fn->getPersonalityFn();
2289   if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
2290       TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
2291     return;
2292 
2293   // If landingpad's return type is token type, we don't create DAG nodes
2294   // for its exception pointer and selector value. The extraction of exception
2295   // pointer or selector value from token type landingpads is not currently
2296   // supported.
2297   if (LP.getType()->isTokenTy())
2298     return;
2299 
2300   SmallVector<EVT, 2> ValueVTs;
2301   SDLoc dl = getCurSDLoc();
2302   ComputeValueVTs(TLI, DAG.getDataLayout(), LP.getType(), ValueVTs);
2303   assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported");
2304 
2305   // Get the two live-in registers as SDValues. The physregs have already been
2306   // copied into virtual registers.
2307   SDValue Ops[2];
2308   if (FuncInfo.ExceptionPointerVirtReg) {
2309     Ops[0] = DAG.getZExtOrTrunc(
2310         DAG.getCopyFromReg(DAG.getEntryNode(), dl,
2311                            FuncInfo.ExceptionPointerVirtReg,
2312                            TLI.getPointerTy(DAG.getDataLayout())),
2313         dl, ValueVTs[0]);
2314   } else {
2315     Ops[0] = DAG.getConstant(0, dl, TLI.getPointerTy(DAG.getDataLayout()));
2316   }
2317   Ops[1] = DAG.getZExtOrTrunc(
2318       DAG.getCopyFromReg(DAG.getEntryNode(), dl,
2319                          FuncInfo.ExceptionSelectorVirtReg,
2320                          TLI.getPointerTy(DAG.getDataLayout())),
2321       dl, ValueVTs[1]);
2322 
2323   // Merge into one.
2324   SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl,
2325                             DAG.getVTList(ValueVTs), Ops);
2326   setValue(&LP, Res);
2327 }
2328 
2329 void SelectionDAGBuilder::sortAndRangeify(CaseClusterVector &Clusters) {
2330 #ifndef NDEBUG
2331   for (const CaseCluster &CC : Clusters)
2332     assert(CC.Low == CC.High && "Input clusters must be single-case");
2333 #endif
2334 
2335   std::sort(Clusters.begin(), Clusters.end(),
2336             [](const CaseCluster &a, const CaseCluster &b) {
2337     return a.Low->getValue().slt(b.Low->getValue());
2338   });
2339 
2340   // Merge adjacent clusters with the same destination.
2341   const unsigned N = Clusters.size();
2342   unsigned DstIndex = 0;
2343   for (unsigned SrcIndex = 0; SrcIndex < N; ++SrcIndex) {
2344     CaseCluster &CC = Clusters[SrcIndex];
2345     const ConstantInt *CaseVal = CC.Low;
2346     MachineBasicBlock *Succ = CC.MBB;
2347 
2348     if (DstIndex != 0 && Clusters[DstIndex - 1].MBB == Succ &&
2349         (CaseVal->getValue() - Clusters[DstIndex - 1].High->getValue()) == 1) {
2350       // If this case has the same successor and is a neighbour, merge it into
2351       // the previous cluster.
2352       Clusters[DstIndex - 1].High = CaseVal;
2353       Clusters[DstIndex - 1].Prob += CC.Prob;
2354     } else {
2355       std::memmove(&Clusters[DstIndex++], &Clusters[SrcIndex],
2356                    sizeof(Clusters[SrcIndex]));
2357     }
2358   }
2359   Clusters.resize(DstIndex);
2360 }
2361 
2362 void SelectionDAGBuilder::UpdateSplitBlock(MachineBasicBlock *First,
2363                                            MachineBasicBlock *Last) {
2364   // Update JTCases.
2365   for (unsigned i = 0, e = JTCases.size(); i != e; ++i)
2366     if (JTCases[i].first.HeaderBB == First)
2367       JTCases[i].first.HeaderBB = Last;
2368 
2369   // Update BitTestCases.
2370   for (unsigned i = 0, e = BitTestCases.size(); i != e; ++i)
2371     if (BitTestCases[i].Parent == First)
2372       BitTestCases[i].Parent = Last;
2373 }
2374 
2375 void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
2376   MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB;
2377 
2378   // Update machine-CFG edges with unique successors.
2379   SmallSet<BasicBlock*, 32> Done;
2380   for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) {
2381     BasicBlock *BB = I.getSuccessor(i);
2382     bool Inserted = Done.insert(BB).second;
2383     if (!Inserted)
2384         continue;
2385 
2386     MachineBasicBlock *Succ = FuncInfo.MBBMap[BB];
2387     addSuccessorWithProb(IndirectBrMBB, Succ);
2388   }
2389   IndirectBrMBB->normalizeSuccProbs();
2390 
2391   DAG.setRoot(DAG.getNode(ISD::BRIND, getCurSDLoc(),
2392                           MVT::Other, getControlRoot(),
2393                           getValue(I.getAddress())));
2394 }
2395 
2396 void SelectionDAGBuilder::visitUnreachable(const UnreachableInst &I) {
2397   if (DAG.getTarget().Options.TrapUnreachable)
2398     DAG.setRoot(
2399         DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, DAG.getRoot()));
2400 }
2401 
2402 void SelectionDAGBuilder::visitFSub(const User &I) {
2403   // -0.0 - X --> fneg
2404   Type *Ty = I.getType();
2405   if (isa<Constant>(I.getOperand(0)) &&
2406       I.getOperand(0) == ConstantFP::getZeroValueForNegation(Ty)) {
2407     SDValue Op2 = getValue(I.getOperand(1));
2408     setValue(&I, DAG.getNode(ISD::FNEG, getCurSDLoc(),
2409                              Op2.getValueType(), Op2));
2410     return;
2411   }
2412 
2413   visitBinary(I, ISD::FSUB);
2414 }
2415 
2416 /// Checks if the given instruction performs a vector reduction, in which case
2417 /// we have the freedom to alter the elements in the result as long as the
2418 /// reduction of them stays unchanged.
2419 static bool isVectorReductionOp(const User *I) {
2420   const Instruction *Inst = dyn_cast<Instruction>(I);
2421   if (!Inst || !Inst->getType()->isVectorTy())
2422     return false;
2423 
2424   auto OpCode = Inst->getOpcode();
2425   switch (OpCode) {
2426   case Instruction::Add:
2427   case Instruction::Mul:
2428   case Instruction::And:
2429   case Instruction::Or:
2430   case Instruction::Xor:
2431     break;
2432   case Instruction::FAdd:
2433   case Instruction::FMul:
2434     if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(Inst))
2435       if (FPOp->getFastMathFlags().unsafeAlgebra())
2436         break;
2437     LLVM_FALLTHROUGH;
2438   default:
2439     return false;
2440   }
2441 
2442   unsigned ElemNum = Inst->getType()->getVectorNumElements();
2443   unsigned ElemNumToReduce = ElemNum;
2444 
2445   // Do DFS search on the def-use chain from the given instruction. We only
2446   // allow four kinds of operations during the search until we reach the
2447   // instruction that extracts the first element from the vector:
2448   //
2449   //   1. The reduction operation of the same opcode as the given instruction.
2450   //
2451   //   2. PHI node.
2452   //
2453   //   3. ShuffleVector instruction together with a reduction operation that
2454   //      does a partial reduction.
2455   //
2456   //   4. ExtractElement that extracts the first element from the vector, and we
2457   //      stop searching the def-use chain here.
2458   //
2459   // 3 & 4 above perform a reduction on all elements of the vector. We push defs
2460   // from 1-3 to the stack to continue the DFS. The given instruction is not
2461   // a reduction operation if we meet any other instructions other than those
2462   // listed above.
2463 
2464   SmallVector<const User *, 16> UsersToVisit{Inst};
2465   SmallPtrSet<const User *, 16> Visited;
2466   bool ReduxExtracted = false;
2467 
2468   while (!UsersToVisit.empty()) {
2469     auto User = UsersToVisit.back();
2470     UsersToVisit.pop_back();
2471     if (!Visited.insert(User).second)
2472       continue;
2473 
2474     for (const auto &U : User->users()) {
2475       auto Inst = dyn_cast<Instruction>(U);
2476       if (!Inst)
2477         return false;
2478 
2479       if (Inst->getOpcode() == OpCode || isa<PHINode>(U)) {
2480         if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(Inst))
2481           if (!isa<PHINode>(FPOp) && !FPOp->getFastMathFlags().unsafeAlgebra())
2482             return false;
2483         UsersToVisit.push_back(U);
2484       } else if (const ShuffleVectorInst *ShufInst =
2485                      dyn_cast<ShuffleVectorInst>(U)) {
2486         // Detect the following pattern: A ShuffleVector instruction together
2487         // with a reduction that do partial reduction on the first and second
2488         // ElemNumToReduce / 2 elements, and store the result in
2489         // ElemNumToReduce / 2 elements in another vector.
2490 
2491         unsigned ResultElements = ShufInst->getType()->getVectorNumElements();
2492         if (ResultElements < ElemNum)
2493           return false;
2494 
2495         if (ElemNumToReduce == 1)
2496           return false;
2497         if (!isa<UndefValue>(U->getOperand(1)))
2498           return false;
2499         for (unsigned i = 0; i < ElemNumToReduce / 2; ++i)
2500           if (ShufInst->getMaskValue(i) != int(i + ElemNumToReduce / 2))
2501             return false;
2502         for (unsigned i = ElemNumToReduce / 2; i < ElemNum; ++i)
2503           if (ShufInst->getMaskValue(i) != -1)
2504             return false;
2505 
2506         // There is only one user of this ShuffleVector instruction, which
2507         // must be a reduction operation.
2508         if (!U->hasOneUse())
2509           return false;
2510 
2511         auto U2 = dyn_cast<Instruction>(*U->user_begin());
2512         if (!U2 || U2->getOpcode() != OpCode)
2513           return false;
2514 
2515         // Check operands of the reduction operation.
2516         if ((U2->getOperand(0) == U->getOperand(0) && U2->getOperand(1) == U) ||
2517             (U2->getOperand(1) == U->getOperand(0) && U2->getOperand(0) == U)) {
2518           UsersToVisit.push_back(U2);
2519           ElemNumToReduce /= 2;
2520         } else
2521           return false;
2522       } else if (isa<ExtractElementInst>(U)) {
2523         // At this moment we should have reduced all elements in the vector.
2524         if (ElemNumToReduce != 1)
2525           return false;
2526 
2527         const ConstantInt *Val = dyn_cast<ConstantInt>(U->getOperand(1));
2528         if (!Val || Val->getZExtValue() != 0)
2529           return false;
2530 
2531         ReduxExtracted = true;
2532       } else
2533         return false;
2534     }
2535   }
2536   return ReduxExtracted;
2537 }
2538 
2539 void SelectionDAGBuilder::visitBinary(const User &I, unsigned OpCode) {
2540   SDValue Op1 = getValue(I.getOperand(0));
2541   SDValue Op2 = getValue(I.getOperand(1));
2542 
2543   bool nuw = false;
2544   bool nsw = false;
2545   bool exact = false;
2546   bool vec_redux = false;
2547   FastMathFlags FMF;
2548 
2549   if (const OverflowingBinaryOperator *OFBinOp =
2550           dyn_cast<const OverflowingBinaryOperator>(&I)) {
2551     nuw = OFBinOp->hasNoUnsignedWrap();
2552     nsw = OFBinOp->hasNoSignedWrap();
2553   }
2554   if (const PossiblyExactOperator *ExactOp =
2555           dyn_cast<const PossiblyExactOperator>(&I))
2556     exact = ExactOp->isExact();
2557   if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(&I))
2558     FMF = FPOp->getFastMathFlags();
2559 
2560   if (isVectorReductionOp(&I)) {
2561     vec_redux = true;
2562     DEBUG(dbgs() << "Detected a reduction operation:" << I << "\n");
2563   }
2564 
2565   SDNodeFlags Flags;
2566   Flags.setExact(exact);
2567   Flags.setNoSignedWrap(nsw);
2568   Flags.setNoUnsignedWrap(nuw);
2569   Flags.setVectorReduction(vec_redux);
2570   Flags.setAllowReciprocal(FMF.allowReciprocal());
2571   Flags.setAllowContract(FMF.allowContract());
2572   Flags.setNoInfs(FMF.noInfs());
2573   Flags.setNoNaNs(FMF.noNaNs());
2574   Flags.setNoSignedZeros(FMF.noSignedZeros());
2575   Flags.setUnsafeAlgebra(FMF.unsafeAlgebra());
2576 
2577   SDValue BinNodeValue = DAG.getNode(OpCode, getCurSDLoc(), Op1.getValueType(),
2578                                      Op1, Op2, Flags);
2579   setValue(&I, BinNodeValue);
2580 }
2581 
2582 void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
2583   SDValue Op1 = getValue(I.getOperand(0));
2584   SDValue Op2 = getValue(I.getOperand(1));
2585 
2586   EVT ShiftTy = DAG.getTargetLoweringInfo().getShiftAmountTy(
2587       Op2.getValueType(), DAG.getDataLayout());
2588 
2589   // Coerce the shift amount to the right type if we can.
2590   if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {
2591     unsigned ShiftSize = ShiftTy.getSizeInBits();
2592     unsigned Op2Size = Op2.getValueSizeInBits();
2593     SDLoc DL = getCurSDLoc();
2594 
2595     // If the operand is smaller than the shift count type, promote it.
2596     if (ShiftSize > Op2Size)
2597       Op2 = DAG.getNode(ISD::ZERO_EXTEND, DL, ShiftTy, Op2);
2598 
2599     // If the operand is larger than the shift count type but the shift
2600     // count type has enough bits to represent any shift value, truncate
2601     // it now. This is a common case and it exposes the truncate to
2602     // optimization early.
2603     else if (ShiftSize >= Log2_32_Ceil(Op2.getValueSizeInBits()))
2604       Op2 = DAG.getNode(ISD::TRUNCATE, DL, ShiftTy, Op2);
2605     // Otherwise we'll need to temporarily settle for some other convenient
2606     // type.  Type legalization will make adjustments once the shiftee is split.
2607     else
2608       Op2 = DAG.getZExtOrTrunc(Op2, DL, MVT::i32);
2609   }
2610 
2611   bool nuw = false;
2612   bool nsw = false;
2613   bool exact = false;
2614 
2615   if (Opcode == ISD::SRL || Opcode == ISD::SRA || Opcode == ISD::SHL) {
2616 
2617     if (const OverflowingBinaryOperator *OFBinOp =
2618             dyn_cast<const OverflowingBinaryOperator>(&I)) {
2619       nuw = OFBinOp->hasNoUnsignedWrap();
2620       nsw = OFBinOp->hasNoSignedWrap();
2621     }
2622     if (const PossiblyExactOperator *ExactOp =
2623             dyn_cast<const PossiblyExactOperator>(&I))
2624       exact = ExactOp->isExact();
2625   }
2626   SDNodeFlags Flags;
2627   Flags.setExact(exact);
2628   Flags.setNoSignedWrap(nsw);
2629   Flags.setNoUnsignedWrap(nuw);
2630   SDValue Res = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(), Op1, Op2,
2631                             Flags);
2632   setValue(&I, Res);
2633 }
2634 
2635 void SelectionDAGBuilder::visitSDiv(const User &I) {
2636   SDValue Op1 = getValue(I.getOperand(0));
2637   SDValue Op2 = getValue(I.getOperand(1));
2638 
2639   SDNodeFlags Flags;
2640   Flags.setExact(isa<PossiblyExactOperator>(&I) &&
2641                  cast<PossiblyExactOperator>(&I)->isExact());
2642   setValue(&I, DAG.getNode(ISD::SDIV, getCurSDLoc(), Op1.getValueType(), Op1,
2643                            Op2, Flags));
2644 }
2645 
2646 void SelectionDAGBuilder::visitICmp(const User &I) {
2647   ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
2648   if (const ICmpInst *IC = dyn_cast<ICmpInst>(&I))
2649     predicate = IC->getPredicate();
2650   else if (const ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
2651     predicate = ICmpInst::Predicate(IC->getPredicate());
2652   SDValue Op1 = getValue(I.getOperand(0));
2653   SDValue Op2 = getValue(I.getOperand(1));
2654   ISD::CondCode Opcode = getICmpCondCode(predicate);
2655 
2656   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2657                                                         I.getType());
2658   setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Opcode));
2659 }
2660 
2661 void SelectionDAGBuilder::visitFCmp(const User &I) {
2662   FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
2663   if (const FCmpInst *FC = dyn_cast<FCmpInst>(&I))
2664     predicate = FC->getPredicate();
2665   else if (const ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
2666     predicate = FCmpInst::Predicate(FC->getPredicate());
2667   SDValue Op1 = getValue(I.getOperand(0));
2668   SDValue Op2 = getValue(I.getOperand(1));
2669   ISD::CondCode Condition = getFCmpCondCode(predicate);
2670 
2671   // FIXME: Fcmp instructions have fast-math-flags in IR, so we should use them.
2672   // FIXME: We should propagate the fast-math-flags to the DAG node itself for
2673   // further optimization, but currently FMF is only applicable to binary nodes.
2674   if (TM.Options.NoNaNsFPMath)
2675     Condition = getFCmpCodeWithoutNaN(Condition);
2676   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2677                                                         I.getType());
2678   setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition));
2679 }
2680 
2681 // Check if the condition of the select has one use or two users that are both
2682 // selects with the same condition.
2683 static bool hasOnlySelectUsers(const Value *Cond) {
2684   return all_of(Cond->users(), [](const Value *V) {
2685     return isa<SelectInst>(V);
2686   });
2687 }
2688 
2689 void SelectionDAGBuilder::visitSelect(const User &I) {
2690   SmallVector<EVT, 4> ValueVTs;
2691   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
2692                   ValueVTs);
2693   unsigned NumValues = ValueVTs.size();
2694   if (NumValues == 0) return;
2695 
2696   SmallVector<SDValue, 4> Values(NumValues);
2697   SDValue Cond     = getValue(I.getOperand(0));
2698   SDValue LHSVal   = getValue(I.getOperand(1));
2699   SDValue RHSVal   = getValue(I.getOperand(2));
2700   auto BaseOps = {Cond};
2701   ISD::NodeType OpCode = Cond.getValueType().isVector() ?
2702     ISD::VSELECT : ISD::SELECT;
2703 
2704   // Min/max matching is only viable if all output VTs are the same.
2705   if (std::equal(ValueVTs.begin(), ValueVTs.end(), ValueVTs.begin())) {
2706     EVT VT = ValueVTs[0];
2707     LLVMContext &Ctx = *DAG.getContext();
2708     auto &TLI = DAG.getTargetLoweringInfo();
2709 
2710     // We care about the legality of the operation after it has been type
2711     // legalized.
2712     while (TLI.getTypeAction(Ctx, VT) != TargetLoweringBase::TypeLegal &&
2713            VT != TLI.getTypeToTransformTo(Ctx, VT))
2714       VT = TLI.getTypeToTransformTo(Ctx, VT);
2715 
2716     // If the vselect is legal, assume we want to leave this as a vector setcc +
2717     // vselect. Otherwise, if this is going to be scalarized, we want to see if
2718     // min/max is legal on the scalar type.
2719     bool UseScalarMinMax = VT.isVector() &&
2720       !TLI.isOperationLegalOrCustom(ISD::VSELECT, VT);
2721 
2722     Value *LHS, *RHS;
2723     auto SPR = matchSelectPattern(const_cast<User*>(&I), LHS, RHS);
2724     ISD::NodeType Opc = ISD::DELETED_NODE;
2725     switch (SPR.Flavor) {
2726     case SPF_UMAX:    Opc = ISD::UMAX; break;
2727     case SPF_UMIN:    Opc = ISD::UMIN; break;
2728     case SPF_SMAX:    Opc = ISD::SMAX; break;
2729     case SPF_SMIN:    Opc = ISD::SMIN; break;
2730     case SPF_FMINNUM:
2731       switch (SPR.NaNBehavior) {
2732       case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
2733       case SPNB_RETURNS_NAN:   Opc = ISD::FMINNAN; break;
2734       case SPNB_RETURNS_OTHER: Opc = ISD::FMINNUM; break;
2735       case SPNB_RETURNS_ANY: {
2736         if (TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT))
2737           Opc = ISD::FMINNUM;
2738         else if (TLI.isOperationLegalOrCustom(ISD::FMINNAN, VT))
2739           Opc = ISD::FMINNAN;
2740         else if (UseScalarMinMax)
2741           Opc = TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT.getScalarType()) ?
2742             ISD::FMINNUM : ISD::FMINNAN;
2743         break;
2744       }
2745       }
2746       break;
2747     case SPF_FMAXNUM:
2748       switch (SPR.NaNBehavior) {
2749       case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
2750       case SPNB_RETURNS_NAN:   Opc = ISD::FMAXNAN; break;
2751       case SPNB_RETURNS_OTHER: Opc = ISD::FMAXNUM; break;
2752       case SPNB_RETURNS_ANY:
2753 
2754         if (TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT))
2755           Opc = ISD::FMAXNUM;
2756         else if (TLI.isOperationLegalOrCustom(ISD::FMAXNAN, VT))
2757           Opc = ISD::FMAXNAN;
2758         else if (UseScalarMinMax)
2759           Opc = TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT.getScalarType()) ?
2760             ISD::FMAXNUM : ISD::FMAXNAN;
2761         break;
2762       }
2763       break;
2764     default: break;
2765     }
2766 
2767     if (Opc != ISD::DELETED_NODE &&
2768         (TLI.isOperationLegalOrCustom(Opc, VT) ||
2769          (UseScalarMinMax &&
2770           TLI.isOperationLegalOrCustom(Opc, VT.getScalarType()))) &&
2771         // If the underlying comparison instruction is used by any other
2772         // instruction, the consumed instructions won't be destroyed, so it is
2773         // not profitable to convert to a min/max.
2774         hasOnlySelectUsers(cast<SelectInst>(I).getCondition())) {
2775       OpCode = Opc;
2776       LHSVal = getValue(LHS);
2777       RHSVal = getValue(RHS);
2778       BaseOps = {};
2779     }
2780   }
2781 
2782   for (unsigned i = 0; i != NumValues; ++i) {
2783     SmallVector<SDValue, 3> Ops(BaseOps.begin(), BaseOps.end());
2784     Ops.push_back(SDValue(LHSVal.getNode(), LHSVal.getResNo() + i));
2785     Ops.push_back(SDValue(RHSVal.getNode(), RHSVal.getResNo() + i));
2786     Values[i] = DAG.getNode(OpCode, getCurSDLoc(),
2787                             LHSVal.getNode()->getValueType(LHSVal.getResNo()+i),
2788                             Ops);
2789   }
2790 
2791   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
2792                            DAG.getVTList(ValueVTs), Values));
2793 }
2794 
2795 void SelectionDAGBuilder::visitTrunc(const User &I) {
2796   // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
2797   SDValue N = getValue(I.getOperand(0));
2798   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2799                                                         I.getType());
2800   setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), DestVT, N));
2801 }
2802 
2803 void SelectionDAGBuilder::visitZExt(const User &I) {
2804   // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
2805   // ZExt also can't be a cast to bool for same reason. So, nothing much to do
2806   SDValue N = getValue(I.getOperand(0));
2807   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2808                                                         I.getType());
2809   setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurSDLoc(), DestVT, N));
2810 }
2811 
2812 void SelectionDAGBuilder::visitSExt(const User &I) {
2813   // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
2814   // SExt also can't be a cast to bool for same reason. So, nothing much to do
2815   SDValue N = getValue(I.getOperand(0));
2816   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2817                                                         I.getType());
2818   setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N));
2819 }
2820 
2821 void SelectionDAGBuilder::visitFPTrunc(const User &I) {
2822   // FPTrunc is never a no-op cast, no need to check
2823   SDValue N = getValue(I.getOperand(0));
2824   SDLoc dl = getCurSDLoc();
2825   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2826   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
2827   setValue(&I, DAG.getNode(ISD::FP_ROUND, dl, DestVT, N,
2828                            DAG.getTargetConstant(
2829                                0, dl, TLI.getPointerTy(DAG.getDataLayout()))));
2830 }
2831 
2832 void SelectionDAGBuilder::visitFPExt(const User &I) {
2833   // FPExt is never a no-op cast, no need to check
2834   SDValue N = getValue(I.getOperand(0));
2835   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2836                                                         I.getType());
2837   setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurSDLoc(), DestVT, N));
2838 }
2839 
2840 void SelectionDAGBuilder::visitFPToUI(const User &I) {
2841   // FPToUI is never a no-op cast, no need to check
2842   SDValue N = getValue(I.getOperand(0));
2843   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2844                                                         I.getType());
2845   setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurSDLoc(), DestVT, N));
2846 }
2847 
2848 void SelectionDAGBuilder::visitFPToSI(const User &I) {
2849   // FPToSI is never a no-op cast, no need to check
2850   SDValue N = getValue(I.getOperand(0));
2851   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2852                                                         I.getType());
2853   setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurSDLoc(), DestVT, N));
2854 }
2855 
2856 void SelectionDAGBuilder::visitUIToFP(const User &I) {
2857   // UIToFP is never a no-op cast, no need to check
2858   SDValue N = getValue(I.getOperand(0));
2859   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2860                                                         I.getType());
2861   setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurSDLoc(), DestVT, N));
2862 }
2863 
2864 void SelectionDAGBuilder::visitSIToFP(const User &I) {
2865   // SIToFP is never a no-op cast, no need to check
2866   SDValue N = getValue(I.getOperand(0));
2867   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2868                                                         I.getType());
2869   setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurSDLoc(), DestVT, N));
2870 }
2871 
2872 void SelectionDAGBuilder::visitPtrToInt(const User &I) {
2873   // What to do depends on the size of the integer and the size of the pointer.
2874   // We can either truncate, zero extend, or no-op, accordingly.
2875   SDValue N = getValue(I.getOperand(0));
2876   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2877                                                         I.getType());
2878   setValue(&I, DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT));
2879 }
2880 
2881 void SelectionDAGBuilder::visitIntToPtr(const User &I) {
2882   // What to do depends on the size of the integer and the size of the pointer.
2883   // We can either truncate, zero extend, or no-op, accordingly.
2884   SDValue N = getValue(I.getOperand(0));
2885   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2886                                                         I.getType());
2887   setValue(&I, DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT));
2888 }
2889 
2890 void SelectionDAGBuilder::visitBitCast(const User &I) {
2891   SDValue N = getValue(I.getOperand(0));
2892   SDLoc dl = getCurSDLoc();
2893   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2894                                                         I.getType());
2895 
2896   // BitCast assures us that source and destination are the same size so this is
2897   // either a BITCAST or a no-op.
2898   if (DestVT != N.getValueType())
2899     setValue(&I, DAG.getNode(ISD::BITCAST, dl,
2900                              DestVT, N)); // convert types.
2901   // Check if the original LLVM IR Operand was a ConstantInt, because getValue()
2902   // might fold any kind of constant expression to an integer constant and that
2903   // is not what we are looking for. Only recognize a bitcast of a genuine
2904   // constant integer as an opaque constant.
2905   else if(ConstantInt *C = dyn_cast<ConstantInt>(I.getOperand(0)))
2906     setValue(&I, DAG.getConstant(C->getValue(), dl, DestVT, /*isTarget=*/false,
2907                                  /*isOpaque*/true));
2908   else
2909     setValue(&I, N);            // noop cast.
2910 }
2911 
2912 void SelectionDAGBuilder::visitAddrSpaceCast(const User &I) {
2913   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2914   const Value *SV = I.getOperand(0);
2915   SDValue N = getValue(SV);
2916   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
2917 
2918   unsigned SrcAS = SV->getType()->getPointerAddressSpace();
2919   unsigned DestAS = I.getType()->getPointerAddressSpace();
2920 
2921   if (!TLI.isNoopAddrSpaceCast(SrcAS, DestAS))
2922     N = DAG.getAddrSpaceCast(getCurSDLoc(), DestVT, N, SrcAS, DestAS);
2923 
2924   setValue(&I, N);
2925 }
2926 
2927 void SelectionDAGBuilder::visitInsertElement(const User &I) {
2928   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2929   SDValue InVec = getValue(I.getOperand(0));
2930   SDValue InVal = getValue(I.getOperand(1));
2931   SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(2)), getCurSDLoc(),
2932                                      TLI.getVectorIdxTy(DAG.getDataLayout()));
2933   setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurSDLoc(),
2934                            TLI.getValueType(DAG.getDataLayout(), I.getType()),
2935                            InVec, InVal, InIdx));
2936 }
2937 
2938 void SelectionDAGBuilder::visitExtractElement(const User &I) {
2939   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2940   SDValue InVec = getValue(I.getOperand(0));
2941   SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(1)), getCurSDLoc(),
2942                                      TLI.getVectorIdxTy(DAG.getDataLayout()));
2943   setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(),
2944                            TLI.getValueType(DAG.getDataLayout(), I.getType()),
2945                            InVec, InIdx));
2946 }
2947 
2948 void SelectionDAGBuilder::visitShuffleVector(const User &I) {
2949   SDValue Src1 = getValue(I.getOperand(0));
2950   SDValue Src2 = getValue(I.getOperand(1));
2951   SDLoc DL = getCurSDLoc();
2952 
2953   SmallVector<int, 8> Mask;
2954   ShuffleVectorInst::getShuffleMask(cast<Constant>(I.getOperand(2)), Mask);
2955   unsigned MaskNumElts = Mask.size();
2956 
2957   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2958   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
2959   EVT SrcVT = Src1.getValueType();
2960   unsigned SrcNumElts = SrcVT.getVectorNumElements();
2961 
2962   if (SrcNumElts == MaskNumElts) {
2963     setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, Mask));
2964     return;
2965   }
2966 
2967   // Normalize the shuffle vector since mask and vector length don't match.
2968   if (SrcNumElts < MaskNumElts) {
2969     // Mask is longer than the source vectors. We can use concatenate vector to
2970     // make the mask and vectors lengths match.
2971 
2972     if (MaskNumElts % SrcNumElts == 0) {
2973       // Mask length is a multiple of the source vector length.
2974       // Check if the shuffle is some kind of concatenation of the input
2975       // vectors.
2976       unsigned NumConcat = MaskNumElts / SrcNumElts;
2977       bool IsConcat = true;
2978       SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
2979       for (unsigned i = 0; i != MaskNumElts; ++i) {
2980         int Idx = Mask[i];
2981         if (Idx < 0)
2982           continue;
2983         // Ensure the indices in each SrcVT sized piece are sequential and that
2984         // the same source is used for the whole piece.
2985         if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
2986             (ConcatSrcs[i / SrcNumElts] >= 0 &&
2987              ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts))) {
2988           IsConcat = false;
2989           break;
2990         }
2991         // Remember which source this index came from.
2992         ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
2993       }
2994 
2995       // The shuffle is concatenating multiple vectors together. Just emit
2996       // a CONCAT_VECTORS operation.
2997       if (IsConcat) {
2998         SmallVector<SDValue, 8> ConcatOps;
2999         for (auto Src : ConcatSrcs) {
3000           if (Src < 0)
3001             ConcatOps.push_back(DAG.getUNDEF(SrcVT));
3002           else if (Src == 0)
3003             ConcatOps.push_back(Src1);
3004           else
3005             ConcatOps.push_back(Src2);
3006         }
3007         setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps));
3008         return;
3009       }
3010     }
3011 
3012     unsigned PaddedMaskNumElts = alignTo(MaskNumElts, SrcNumElts);
3013     unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
3014     EVT PaddedVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(),
3015                                     PaddedMaskNumElts);
3016 
3017     // Pad both vectors with undefs to make them the same length as the mask.
3018     SDValue UndefVal = DAG.getUNDEF(SrcVT);
3019 
3020     SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
3021     SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
3022     MOps1[0] = Src1;
3023     MOps2[0] = Src2;
3024 
3025     Src1 = Src1.isUndef()
3026                ? DAG.getUNDEF(PaddedVT)
3027                : DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps1);
3028     Src2 = Src2.isUndef()
3029                ? DAG.getUNDEF(PaddedVT)
3030                : DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps2);
3031 
3032     // Readjust mask for new input vector length.
3033     SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1);
3034     for (unsigned i = 0; i != MaskNumElts; ++i) {
3035       int Idx = Mask[i];
3036       if (Idx >= (int)SrcNumElts)
3037         Idx -= SrcNumElts - PaddedMaskNumElts;
3038       MappedOps[i] = Idx;
3039     }
3040 
3041     SDValue Result = DAG.getVectorShuffle(PaddedVT, DL, Src1, Src2, MappedOps);
3042 
3043     // If the concatenated vector was padded, extract a subvector with the
3044     // correct number of elements.
3045     if (MaskNumElts != PaddedMaskNumElts)
3046       Result = DAG.getNode(
3047           ISD::EXTRACT_SUBVECTOR, DL, VT, Result,
3048           DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
3049 
3050     setValue(&I, Result);
3051     return;
3052   }
3053 
3054   if (SrcNumElts > MaskNumElts) {
3055     // Analyze the access pattern of the vector to see if we can extract
3056     // two subvectors and do the shuffle.
3057     int StartIdx[2] = { -1, -1 };  // StartIdx to extract from
3058     bool CanExtract = true;
3059     for (int Idx : Mask) {
3060       unsigned Input = 0;
3061       if (Idx < 0)
3062         continue;
3063 
3064       if (Idx >= (int)SrcNumElts) {
3065         Input = 1;
3066         Idx -= SrcNumElts;
3067       }
3068 
3069       // If all the indices come from the same MaskNumElts sized portion of
3070       // the sources we can use extract. Also make sure the extract wouldn't
3071       // extract past the end of the source.
3072       int NewStartIdx = alignDown(Idx, MaskNumElts);
3073       if (NewStartIdx + MaskNumElts > SrcNumElts ||
3074           (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
3075         CanExtract = false;
3076       // Make sure we always update StartIdx as we use it to track if all
3077       // elements are undef.
3078       StartIdx[Input] = NewStartIdx;
3079     }
3080 
3081     if (StartIdx[0] < 0 && StartIdx[1] < 0) {
3082       setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
3083       return;
3084     }
3085     if (CanExtract) {
3086       // Extract appropriate subvector and generate a vector shuffle
3087       for (unsigned Input = 0; Input < 2; ++Input) {
3088         SDValue &Src = Input == 0 ? Src1 : Src2;
3089         if (StartIdx[Input] < 0)
3090           Src = DAG.getUNDEF(VT);
3091         else {
3092           Src = DAG.getNode(
3093               ISD::EXTRACT_SUBVECTOR, DL, VT, Src,
3094               DAG.getConstant(StartIdx[Input], DL,
3095                               TLI.getVectorIdxTy(DAG.getDataLayout())));
3096         }
3097       }
3098 
3099       // Calculate new mask.
3100       SmallVector<int, 8> MappedOps(Mask.begin(), Mask.end());
3101       for (int &Idx : MappedOps) {
3102         if (Idx >= (int)SrcNumElts)
3103           Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
3104         else if (Idx >= 0)
3105           Idx -= StartIdx[0];
3106       }
3107 
3108       setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, MappedOps));
3109       return;
3110     }
3111   }
3112 
3113   // We can't use either concat vectors or extract subvectors so fall back to
3114   // replacing the shuffle with extract and build vector.
3115   // to insert and build vector.
3116   EVT EltVT = VT.getVectorElementType();
3117   EVT IdxVT = TLI.getVectorIdxTy(DAG.getDataLayout());
3118   SmallVector<SDValue,8> Ops;
3119   for (int Idx : Mask) {
3120     SDValue Res;
3121 
3122     if (Idx < 0) {
3123       Res = DAG.getUNDEF(EltVT);
3124     } else {
3125       SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
3126       if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts;
3127 
3128       Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
3129                         EltVT, Src, DAG.getConstant(Idx, DL, IdxVT));
3130     }
3131 
3132     Ops.push_back(Res);
3133   }
3134 
3135   setValue(&I, DAG.getBuildVector(VT, DL, Ops));
3136 }
3137 
3138 void SelectionDAGBuilder::visitInsertValue(const InsertValueInst &I) {
3139   const Value *Op0 = I.getOperand(0);
3140   const Value *Op1 = I.getOperand(1);
3141   Type *AggTy = I.getType();
3142   Type *ValTy = Op1->getType();
3143   bool IntoUndef = isa<UndefValue>(Op0);
3144   bool FromUndef = isa<UndefValue>(Op1);
3145 
3146   unsigned LinearIndex = ComputeLinearIndex(AggTy, I.getIndices());
3147 
3148   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3149   SmallVector<EVT, 4> AggValueVTs;
3150   ComputeValueVTs(TLI, DAG.getDataLayout(), AggTy, AggValueVTs);
3151   SmallVector<EVT, 4> ValValueVTs;
3152   ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
3153 
3154   unsigned NumAggValues = AggValueVTs.size();
3155   unsigned NumValValues = ValValueVTs.size();
3156   SmallVector<SDValue, 4> Values(NumAggValues);
3157 
3158   // Ignore an insertvalue that produces an empty object
3159   if (!NumAggValues) {
3160     setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
3161     return;
3162   }
3163 
3164   SDValue Agg = getValue(Op0);
3165   unsigned i = 0;
3166   // Copy the beginning value(s) from the original aggregate.
3167   for (; i != LinearIndex; ++i)
3168     Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3169                 SDValue(Agg.getNode(), Agg.getResNo() + i);
3170   // Copy values from the inserted value(s).
3171   if (NumValValues) {
3172     SDValue Val = getValue(Op1);
3173     for (; i != LinearIndex + NumValValues; ++i)
3174       Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3175                   SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
3176   }
3177   // Copy remaining value(s) from the original aggregate.
3178   for (; i != NumAggValues; ++i)
3179     Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3180                 SDValue(Agg.getNode(), Agg.getResNo() + i);
3181 
3182   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3183                            DAG.getVTList(AggValueVTs), Values));
3184 }
3185 
3186 void SelectionDAGBuilder::visitExtractValue(const ExtractValueInst &I) {
3187   const Value *Op0 = I.getOperand(0);
3188   Type *AggTy = Op0->getType();
3189   Type *ValTy = I.getType();
3190   bool OutOfUndef = isa<UndefValue>(Op0);
3191 
3192   unsigned LinearIndex = ComputeLinearIndex(AggTy, I.getIndices());
3193 
3194   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3195   SmallVector<EVT, 4> ValValueVTs;
3196   ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
3197 
3198   unsigned NumValValues = ValValueVTs.size();
3199 
3200   // Ignore a extractvalue that produces an empty object
3201   if (!NumValValues) {
3202     setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
3203     return;
3204   }
3205 
3206   SmallVector<SDValue, 4> Values(NumValValues);
3207 
3208   SDValue Agg = getValue(Op0);
3209   // Copy out the selected value(s).
3210   for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
3211     Values[i - LinearIndex] =
3212       OutOfUndef ?
3213         DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
3214         SDValue(Agg.getNode(), Agg.getResNo() + i);
3215 
3216   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3217                            DAG.getVTList(ValValueVTs), Values));
3218 }
3219 
3220 void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
3221   Value *Op0 = I.getOperand(0);
3222   // Note that the pointer operand may be a vector of pointers. Take the scalar
3223   // element which holds a pointer.
3224   unsigned AS = Op0->getType()->getScalarType()->getPointerAddressSpace();
3225   SDValue N = getValue(Op0);
3226   SDLoc dl = getCurSDLoc();
3227 
3228   // Normalize Vector GEP - all scalar operands should be converted to the
3229   // splat vector.
3230   unsigned VectorWidth = I.getType()->isVectorTy() ?
3231     cast<VectorType>(I.getType())->getVectorNumElements() : 0;
3232 
3233   if (VectorWidth && !N.getValueType().isVector()) {
3234     LLVMContext &Context = *DAG.getContext();
3235     EVT VT = EVT::getVectorVT(Context, N.getValueType(), VectorWidth);
3236     N = DAG.getSplatBuildVector(VT, dl, N);
3237   }
3238 
3239   for (gep_type_iterator GTI = gep_type_begin(&I), E = gep_type_end(&I);
3240        GTI != E; ++GTI) {
3241     const Value *Idx = GTI.getOperand();
3242     if (StructType *StTy = GTI.getStructTypeOrNull()) {
3243       unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
3244       if (Field) {
3245         // N = N + Offset
3246         uint64_t Offset = DL->getStructLayout(StTy)->getElementOffset(Field);
3247 
3248         // In an inbounds GEP with an offset that is nonnegative even when
3249         // interpreted as signed, assume there is no unsigned overflow.
3250         SDNodeFlags Flags;
3251         if (int64_t(Offset) >= 0 && cast<GEPOperator>(I).isInBounds())
3252           Flags.setNoUnsignedWrap(true);
3253 
3254         N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N,
3255                         DAG.getConstant(Offset, dl, N.getValueType()), Flags);
3256       }
3257     } else {
3258       MVT PtrTy =
3259           DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout(), AS);
3260       unsigned PtrSize = PtrTy.getSizeInBits();
3261       APInt ElementSize(PtrSize, DL->getTypeAllocSize(GTI.getIndexedType()));
3262 
3263       // If this is a scalar constant or a splat vector of constants,
3264       // handle it quickly.
3265       const auto *CI = dyn_cast<ConstantInt>(Idx);
3266       if (!CI && isa<ConstantDataVector>(Idx) &&
3267           cast<ConstantDataVector>(Idx)->getSplatValue())
3268         CI = cast<ConstantInt>(cast<ConstantDataVector>(Idx)->getSplatValue());
3269 
3270       if (CI) {
3271         if (CI->isZero())
3272           continue;
3273         APInt Offs = ElementSize * CI->getValue().sextOrTrunc(PtrSize);
3274         LLVMContext &Context = *DAG.getContext();
3275         SDValue OffsVal = VectorWidth ?
3276           DAG.getConstant(Offs, dl, EVT::getVectorVT(Context, PtrTy, VectorWidth)) :
3277           DAG.getConstant(Offs, dl, PtrTy);
3278 
3279         // In an inbouds GEP with an offset that is nonnegative even when
3280         // interpreted as signed, assume there is no unsigned overflow.
3281         SDNodeFlags Flags;
3282         if (Offs.isNonNegative() && cast<GEPOperator>(I).isInBounds())
3283           Flags.setNoUnsignedWrap(true);
3284 
3285         N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N, OffsVal, Flags);
3286         continue;
3287       }
3288 
3289       // N = N + Idx * ElementSize;
3290       SDValue IdxN = getValue(Idx);
3291 
3292       if (!IdxN.getValueType().isVector() && VectorWidth) {
3293         MVT VT = MVT::getVectorVT(IdxN.getValueType().getSimpleVT(), VectorWidth);
3294         IdxN = DAG.getSplatBuildVector(VT, dl, IdxN);
3295       }
3296 
3297       // If the index is smaller or larger than intptr_t, truncate or extend
3298       // it.
3299       IdxN = DAG.getSExtOrTrunc(IdxN, dl, N.getValueType());
3300 
3301       // If this is a multiply by a power of two, turn it into a shl
3302       // immediately.  This is a very common case.
3303       if (ElementSize != 1) {
3304         if (ElementSize.isPowerOf2()) {
3305           unsigned Amt = ElementSize.logBase2();
3306           IdxN = DAG.getNode(ISD::SHL, dl,
3307                              N.getValueType(), IdxN,
3308                              DAG.getConstant(Amt, dl, IdxN.getValueType()));
3309         } else {
3310           SDValue Scale = DAG.getConstant(ElementSize, dl, IdxN.getValueType());
3311           IdxN = DAG.getNode(ISD::MUL, dl,
3312                              N.getValueType(), IdxN, Scale);
3313         }
3314       }
3315 
3316       N = DAG.getNode(ISD::ADD, dl,
3317                       N.getValueType(), N, IdxN);
3318     }
3319   }
3320 
3321   setValue(&I, N);
3322 }
3323 
3324 void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
3325   // If this is a fixed sized alloca in the entry block of the function,
3326   // allocate it statically on the stack.
3327   if (FuncInfo.StaticAllocaMap.count(&I))
3328     return;   // getValue will auto-populate this.
3329 
3330   SDLoc dl = getCurSDLoc();
3331   Type *Ty = I.getAllocatedType();
3332   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3333   auto &DL = DAG.getDataLayout();
3334   uint64_t TySize = DL.getTypeAllocSize(Ty);
3335   unsigned Align =
3336       std::max((unsigned)DL.getPrefTypeAlignment(Ty), I.getAlignment());
3337 
3338   SDValue AllocSize = getValue(I.getArraySize());
3339 
3340   EVT IntPtr = TLI.getPointerTy(DAG.getDataLayout());
3341   if (AllocSize.getValueType() != IntPtr)
3342     AllocSize = DAG.getZExtOrTrunc(AllocSize, dl, IntPtr);
3343 
3344   AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr,
3345                           AllocSize,
3346                           DAG.getConstant(TySize, dl, IntPtr));
3347 
3348   // Handle alignment.  If the requested alignment is less than or equal to
3349   // the stack alignment, ignore it.  If the size is greater than or equal to
3350   // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
3351   unsigned StackAlign =
3352       DAG.getSubtarget().getFrameLowering()->getStackAlignment();
3353   if (Align <= StackAlign)
3354     Align = 0;
3355 
3356   // Round the size of the allocation up to the stack alignment size
3357   // by add SA-1 to the size. This doesn't overflow because we're computing
3358   // an address inside an alloca.
3359   SDNodeFlags Flags;
3360   Flags.setNoUnsignedWrap(true);
3361   AllocSize = DAG.getNode(ISD::ADD, dl,
3362                           AllocSize.getValueType(), AllocSize,
3363                           DAG.getIntPtrConstant(StackAlign - 1, dl), Flags);
3364 
3365   // Mask out the low bits for alignment purposes.
3366   AllocSize = DAG.getNode(ISD::AND, dl,
3367                           AllocSize.getValueType(), AllocSize,
3368                           DAG.getIntPtrConstant(~(uint64_t)(StackAlign - 1),
3369                                                 dl));
3370 
3371   SDValue Ops[] = { getRoot(), AllocSize, DAG.getIntPtrConstant(Align, dl) };
3372   SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
3373   SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, dl, VTs, Ops);
3374   setValue(&I, DSA);
3375   DAG.setRoot(DSA.getValue(1));
3376 
3377   assert(FuncInfo.MF->getFrameInfo().hasVarSizedObjects());
3378 }
3379 
3380 void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
3381   if (I.isAtomic())
3382     return visitAtomicLoad(I);
3383 
3384   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3385   const Value *SV = I.getOperand(0);
3386   if (TLI.supportSwiftError()) {
3387     // Swifterror values can come from either a function parameter with
3388     // swifterror attribute or an alloca with swifterror attribute.
3389     if (const Argument *Arg = dyn_cast<Argument>(SV)) {
3390       if (Arg->hasSwiftErrorAttr())
3391         return visitLoadFromSwiftError(I);
3392     }
3393 
3394     if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
3395       if (Alloca->isSwiftError())
3396         return visitLoadFromSwiftError(I);
3397     }
3398   }
3399 
3400   SDValue Ptr = getValue(SV);
3401 
3402   Type *Ty = I.getType();
3403 
3404   bool isVolatile = I.isVolatile();
3405   bool isNonTemporal = I.getMetadata(LLVMContext::MD_nontemporal) != nullptr;
3406   bool isInvariant = I.getMetadata(LLVMContext::MD_invariant_load) != nullptr;
3407   bool isDereferenceable = isDereferenceablePointer(SV, DAG.getDataLayout());
3408   unsigned Alignment = I.getAlignment();
3409 
3410   AAMDNodes AAInfo;
3411   I.getAAMetadata(AAInfo);
3412   const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
3413 
3414   SmallVector<EVT, 4> ValueVTs;
3415   SmallVector<uint64_t, 4> Offsets;
3416   ComputeValueVTs(TLI, DAG.getDataLayout(), Ty, ValueVTs, &Offsets);
3417   unsigned NumValues = ValueVTs.size();
3418   if (NumValues == 0)
3419     return;
3420 
3421   SDValue Root;
3422   bool ConstantMemory = false;
3423   if (isVolatile || NumValues > MaxParallelChains)
3424     // Serialize volatile loads with other side effects.
3425     Root = getRoot();
3426   else if (AA->pointsToConstantMemory(MemoryLocation(
3427                SV, DAG.getDataLayout().getTypeStoreSize(Ty), AAInfo))) {
3428     // Do not serialize (non-volatile) loads of constant memory with anything.
3429     Root = DAG.getEntryNode();
3430     ConstantMemory = true;
3431   } else {
3432     // Do not serialize non-volatile loads against each other.
3433     Root = DAG.getRoot();
3434   }
3435 
3436   SDLoc dl = getCurSDLoc();
3437 
3438   if (isVolatile)
3439     Root = TLI.prepareVolatileOrAtomicLoad(Root, dl, DAG);
3440 
3441   // An aggregate load cannot wrap around the address space, so offsets to its
3442   // parts don't wrap either.
3443   SDNodeFlags Flags;
3444   Flags.setNoUnsignedWrap(true);
3445 
3446   SmallVector<SDValue, 4> Values(NumValues);
3447   SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
3448   EVT PtrVT = Ptr.getValueType();
3449   unsigned ChainI = 0;
3450   for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
3451     // Serializing loads here may result in excessive register pressure, and
3452     // TokenFactor places arbitrary choke points on the scheduler. SD scheduling
3453     // could recover a bit by hoisting nodes upward in the chain by recognizing
3454     // they are side-effect free or do not alias. The optimizer should really
3455     // avoid this case by converting large object/array copies to llvm.memcpy
3456     // (MaxParallelChains should always remain as failsafe).
3457     if (ChainI == MaxParallelChains) {
3458       assert(PendingLoads.empty() && "PendingLoads must be serialized first");
3459       SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3460                                   makeArrayRef(Chains.data(), ChainI));
3461       Root = Chain;
3462       ChainI = 0;
3463     }
3464     SDValue A = DAG.getNode(ISD::ADD, dl,
3465                             PtrVT, Ptr,
3466                             DAG.getConstant(Offsets[i], dl, PtrVT),
3467                             Flags);
3468     auto MMOFlags = MachineMemOperand::MONone;
3469     if (isVolatile)
3470       MMOFlags |= MachineMemOperand::MOVolatile;
3471     if (isNonTemporal)
3472       MMOFlags |= MachineMemOperand::MONonTemporal;
3473     if (isInvariant)
3474       MMOFlags |= MachineMemOperand::MOInvariant;
3475     if (isDereferenceable)
3476       MMOFlags |= MachineMemOperand::MODereferenceable;
3477 
3478     SDValue L = DAG.getLoad(ValueVTs[i], dl, Root, A,
3479                             MachinePointerInfo(SV, Offsets[i]), Alignment,
3480                             MMOFlags, AAInfo, Ranges);
3481 
3482     Values[i] = L;
3483     Chains[ChainI] = L.getValue(1);
3484   }
3485 
3486   if (!ConstantMemory) {
3487     SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3488                                 makeArrayRef(Chains.data(), ChainI));
3489     if (isVolatile)
3490       DAG.setRoot(Chain);
3491     else
3492       PendingLoads.push_back(Chain);
3493   }
3494 
3495   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, dl,
3496                            DAG.getVTList(ValueVTs), Values));
3497 }
3498 
3499 void SelectionDAGBuilder::visitStoreToSwiftError(const StoreInst &I) {
3500   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3501   assert(TLI.supportSwiftError() &&
3502          "call visitStoreToSwiftError when backend supports swifterror");
3503 
3504   SmallVector<EVT, 4> ValueVTs;
3505   SmallVector<uint64_t, 4> Offsets;
3506   const Value *SrcV = I.getOperand(0);
3507   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
3508                   SrcV->getType(), ValueVTs, &Offsets);
3509   assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
3510          "expect a single EVT for swifterror");
3511 
3512   SDValue Src = getValue(SrcV);
3513   // Create a virtual register, then update the virtual register.
3514   auto &DL = DAG.getDataLayout();
3515   const TargetRegisterClass *RC = TLI.getRegClassFor(TLI.getPointerTy(DL));
3516   unsigned VReg = FuncInfo.MF->getRegInfo().createVirtualRegister(RC);
3517   // Chain, DL, Reg, N or Chain, DL, Reg, N, Glue
3518   // Chain can be getRoot or getControlRoot.
3519   SDValue CopyNode = DAG.getCopyToReg(getRoot(), getCurSDLoc(), VReg,
3520                                       SDValue(Src.getNode(), Src.getResNo()));
3521   DAG.setRoot(CopyNode);
3522   FuncInfo.setCurrentSwiftErrorVReg(FuncInfo.MBB, I.getOperand(1), VReg);
3523 }
3524 
3525 void SelectionDAGBuilder::visitLoadFromSwiftError(const LoadInst &I) {
3526   assert(DAG.getTargetLoweringInfo().supportSwiftError() &&
3527          "call visitLoadFromSwiftError when backend supports swifterror");
3528 
3529   assert(!I.isVolatile() &&
3530          I.getMetadata(LLVMContext::MD_nontemporal) == nullptr &&
3531          I.getMetadata(LLVMContext::MD_invariant_load) == nullptr &&
3532          "Support volatile, non temporal, invariant for load_from_swift_error");
3533 
3534   const Value *SV = I.getOperand(0);
3535   Type *Ty = I.getType();
3536   AAMDNodes AAInfo;
3537   I.getAAMetadata(AAInfo);
3538   assert(!AA->pointsToConstantMemory(MemoryLocation(
3539              SV, DAG.getDataLayout().getTypeStoreSize(Ty), AAInfo)) &&
3540          "load_from_swift_error should not be constant memory");
3541 
3542   SmallVector<EVT, 4> ValueVTs;
3543   SmallVector<uint64_t, 4> Offsets;
3544   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), Ty,
3545                   ValueVTs, &Offsets);
3546   assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
3547          "expect a single EVT for swifterror");
3548 
3549   // Chain, DL, Reg, VT, Glue or Chain, DL, Reg, VT
3550   SDValue L = DAG.getCopyFromReg(
3551       getRoot(), getCurSDLoc(),
3552       FuncInfo.getOrCreateSwiftErrorVReg(FuncInfo.MBB, SV), ValueVTs[0]);
3553 
3554   setValue(&I, L);
3555 }
3556 
3557 void SelectionDAGBuilder::visitStore(const StoreInst &I) {
3558   if (I.isAtomic())
3559     return visitAtomicStore(I);
3560 
3561   const Value *SrcV = I.getOperand(0);
3562   const Value *PtrV = I.getOperand(1);
3563 
3564   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3565   if (TLI.supportSwiftError()) {
3566     // Swifterror values can come from either a function parameter with
3567     // swifterror attribute or an alloca with swifterror attribute.
3568     if (const Argument *Arg = dyn_cast<Argument>(PtrV)) {
3569       if (Arg->hasSwiftErrorAttr())
3570         return visitStoreToSwiftError(I);
3571     }
3572 
3573     if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
3574       if (Alloca->isSwiftError())
3575         return visitStoreToSwiftError(I);
3576     }
3577   }
3578 
3579   SmallVector<EVT, 4> ValueVTs;
3580   SmallVector<uint64_t, 4> Offsets;
3581   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
3582                   SrcV->getType(), ValueVTs, &Offsets);
3583   unsigned NumValues = ValueVTs.size();
3584   if (NumValues == 0)
3585     return;
3586 
3587   // Get the lowered operands. Note that we do this after
3588   // checking if NumResults is zero, because with zero results
3589   // the operands won't have values in the map.
3590   SDValue Src = getValue(SrcV);
3591   SDValue Ptr = getValue(PtrV);
3592 
3593   SDValue Root = getRoot();
3594   SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
3595   SDLoc dl = getCurSDLoc();
3596   EVT PtrVT = Ptr.getValueType();
3597   unsigned Alignment = I.getAlignment();
3598   AAMDNodes AAInfo;
3599   I.getAAMetadata(AAInfo);
3600 
3601   auto MMOFlags = MachineMemOperand::MONone;
3602   if (I.isVolatile())
3603     MMOFlags |= MachineMemOperand::MOVolatile;
3604   if (I.getMetadata(LLVMContext::MD_nontemporal) != nullptr)
3605     MMOFlags |= MachineMemOperand::MONonTemporal;
3606 
3607   // An aggregate load cannot wrap around the address space, so offsets to its
3608   // parts don't wrap either.
3609   SDNodeFlags Flags;
3610   Flags.setNoUnsignedWrap(true);
3611 
3612   unsigned ChainI = 0;
3613   for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
3614     // See visitLoad comments.
3615     if (ChainI == MaxParallelChains) {
3616       SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3617                                   makeArrayRef(Chains.data(), ChainI));
3618       Root = Chain;
3619       ChainI = 0;
3620     }
3621     SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, Ptr,
3622                               DAG.getConstant(Offsets[i], dl, PtrVT), Flags);
3623     SDValue St = DAG.getStore(
3624         Root, dl, SDValue(Src.getNode(), Src.getResNo() + i), Add,
3625         MachinePointerInfo(PtrV, Offsets[i]), Alignment, MMOFlags, AAInfo);
3626     Chains[ChainI] = St;
3627   }
3628 
3629   SDValue StoreNode = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3630                                   makeArrayRef(Chains.data(), ChainI));
3631   DAG.setRoot(StoreNode);
3632 }
3633 
3634 void SelectionDAGBuilder::visitMaskedStore(const CallInst &I,
3635                                            bool IsCompressing) {
3636   SDLoc sdl = getCurSDLoc();
3637 
3638   auto getMaskedStoreOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
3639                            unsigned& Alignment) {
3640     // llvm.masked.store.*(Src0, Ptr, alignment, Mask)
3641     Src0 = I.getArgOperand(0);
3642     Ptr = I.getArgOperand(1);
3643     Alignment = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue();
3644     Mask = I.getArgOperand(3);
3645   };
3646   auto getCompressingStoreOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
3647                            unsigned& Alignment) {
3648     // llvm.masked.compressstore.*(Src0, Ptr, Mask)
3649     Src0 = I.getArgOperand(0);
3650     Ptr = I.getArgOperand(1);
3651     Mask = I.getArgOperand(2);
3652     Alignment = 0;
3653   };
3654 
3655   Value  *PtrOperand, *MaskOperand, *Src0Operand;
3656   unsigned Alignment;
3657   if (IsCompressing)
3658     getCompressingStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
3659   else
3660     getMaskedStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
3661 
3662   SDValue Ptr = getValue(PtrOperand);
3663   SDValue Src0 = getValue(Src0Operand);
3664   SDValue Mask = getValue(MaskOperand);
3665 
3666   EVT VT = Src0.getValueType();
3667   if (!Alignment)
3668     Alignment = DAG.getEVTAlignment(VT);
3669 
3670   AAMDNodes AAInfo;
3671   I.getAAMetadata(AAInfo);
3672 
3673   MachineMemOperand *MMO =
3674     DAG.getMachineFunction().
3675     getMachineMemOperand(MachinePointerInfo(PtrOperand),
3676                           MachineMemOperand::MOStore,  VT.getStoreSize(),
3677                           Alignment, AAInfo);
3678   SDValue StoreNode = DAG.getMaskedStore(getRoot(), sdl, Src0, Ptr, Mask, VT,
3679                                          MMO, false /* Truncating */,
3680                                          IsCompressing);
3681   DAG.setRoot(StoreNode);
3682   setValue(&I, StoreNode);
3683 }
3684 
3685 // Get a uniform base for the Gather/Scatter intrinsic.
3686 // The first argument of the Gather/Scatter intrinsic is a vector of pointers.
3687 // We try to represent it as a base pointer + vector of indices.
3688 // Usually, the vector of pointers comes from a 'getelementptr' instruction.
3689 // The first operand of the GEP may be a single pointer or a vector of pointers
3690 // Example:
3691 //   %gep.ptr = getelementptr i32, <8 x i32*> %vptr, <8 x i32> %ind
3692 //  or
3693 //   %gep.ptr = getelementptr i32, i32* %ptr,        <8 x i32> %ind
3694 // %res = call <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> %gep.ptr, ..
3695 //
3696 // When the first GEP operand is a single pointer - it is the uniform base we
3697 // are looking for. If first operand of the GEP is a splat vector - we
3698 // extract the spalt value and use it as a uniform base.
3699 // In all other cases the function returns 'false'.
3700 //
3701 static bool getUniformBase(const Value* &Ptr, SDValue& Base, SDValue& Index,
3702                            SelectionDAGBuilder* SDB) {
3703 
3704   SelectionDAG& DAG = SDB->DAG;
3705   LLVMContext &Context = *DAG.getContext();
3706 
3707   assert(Ptr->getType()->isVectorTy() && "Uexpected pointer type");
3708   const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
3709   if (!GEP || GEP->getNumOperands() > 2)
3710     return false;
3711 
3712   const Value *GEPPtr = GEP->getPointerOperand();
3713   if (!GEPPtr->getType()->isVectorTy())
3714     Ptr = GEPPtr;
3715   else if (!(Ptr = getSplatValue(GEPPtr)))
3716     return false;
3717 
3718   Value *IndexVal = GEP->getOperand(1);
3719 
3720   // The operands of the GEP may be defined in another basic block.
3721   // In this case we'll not find nodes for the operands.
3722   if (!SDB->findValue(Ptr) || !SDB->findValue(IndexVal))
3723     return false;
3724 
3725   Base = SDB->getValue(Ptr);
3726   Index = SDB->getValue(IndexVal);
3727 
3728   // Suppress sign extension.
3729   if (SExtInst* Sext = dyn_cast<SExtInst>(IndexVal)) {
3730     if (SDB->findValue(Sext->getOperand(0))) {
3731       IndexVal = Sext->getOperand(0);
3732       Index = SDB->getValue(IndexVal);
3733     }
3734   }
3735   if (!Index.getValueType().isVector()) {
3736     unsigned GEPWidth = GEP->getType()->getVectorNumElements();
3737     EVT VT = EVT::getVectorVT(Context, Index.getValueType(), GEPWidth);
3738     Index = DAG.getSplatBuildVector(VT, SDLoc(Index), Index);
3739   }
3740   return true;
3741 }
3742 
3743 void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
3744   SDLoc sdl = getCurSDLoc();
3745 
3746   // llvm.masked.scatter.*(Src0, Ptrs, alignemt, Mask)
3747   const Value *Ptr = I.getArgOperand(1);
3748   SDValue Src0 = getValue(I.getArgOperand(0));
3749   SDValue Mask = getValue(I.getArgOperand(3));
3750   EVT VT = Src0.getValueType();
3751   unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(2)))->getZExtValue();
3752   if (!Alignment)
3753     Alignment = DAG.getEVTAlignment(VT);
3754   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3755 
3756   AAMDNodes AAInfo;
3757   I.getAAMetadata(AAInfo);
3758 
3759   SDValue Base;
3760   SDValue Index;
3761   const Value *BasePtr = Ptr;
3762   bool UniformBase = getUniformBase(BasePtr, Base, Index, this);
3763 
3764   const Value *MemOpBasePtr = UniformBase ? BasePtr : nullptr;
3765   MachineMemOperand *MMO = DAG.getMachineFunction().
3766     getMachineMemOperand(MachinePointerInfo(MemOpBasePtr),
3767                          MachineMemOperand::MOStore,  VT.getStoreSize(),
3768                          Alignment, AAInfo);
3769   if (!UniformBase) {
3770     Base = DAG.getTargetConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
3771     Index = getValue(Ptr);
3772   }
3773   SDValue Ops[] = { getRoot(), Src0, Mask, Base, Index };
3774   SDValue Scatter = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), VT, sdl,
3775                                          Ops, MMO);
3776   DAG.setRoot(Scatter);
3777   setValue(&I, Scatter);
3778 }
3779 
3780 void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) {
3781   SDLoc sdl = getCurSDLoc();
3782 
3783   auto getMaskedLoadOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
3784                            unsigned& Alignment) {
3785     // @llvm.masked.load.*(Ptr, alignment, Mask, Src0)
3786     Ptr = I.getArgOperand(0);
3787     Alignment = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
3788     Mask = I.getArgOperand(2);
3789     Src0 = I.getArgOperand(3);
3790   };
3791   auto getExpandingLoadOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
3792                            unsigned& Alignment) {
3793     // @llvm.masked.expandload.*(Ptr, Mask, Src0)
3794     Ptr = I.getArgOperand(0);
3795     Alignment = 0;
3796     Mask = I.getArgOperand(1);
3797     Src0 = I.getArgOperand(2);
3798   };
3799 
3800   Value  *PtrOperand, *MaskOperand, *Src0Operand;
3801   unsigned Alignment;
3802   if (IsExpanding)
3803     getExpandingLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
3804   else
3805     getMaskedLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
3806 
3807   SDValue Ptr = getValue(PtrOperand);
3808   SDValue Src0 = getValue(Src0Operand);
3809   SDValue Mask = getValue(MaskOperand);
3810 
3811   EVT VT = Src0.getValueType();
3812   if (!Alignment)
3813     Alignment = DAG.getEVTAlignment(VT);
3814 
3815   AAMDNodes AAInfo;
3816   I.getAAMetadata(AAInfo);
3817   const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
3818 
3819   // Do not serialize masked loads of constant memory with anything.
3820   bool AddToChain = !AA->pointsToConstantMemory(MemoryLocation(
3821       PtrOperand, DAG.getDataLayout().getTypeStoreSize(I.getType()), AAInfo));
3822   SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
3823 
3824   MachineMemOperand *MMO =
3825     DAG.getMachineFunction().
3826     getMachineMemOperand(MachinePointerInfo(PtrOperand),
3827                           MachineMemOperand::MOLoad,  VT.getStoreSize(),
3828                           Alignment, AAInfo, Ranges);
3829 
3830   SDValue Load = DAG.getMaskedLoad(VT, sdl, InChain, Ptr, Mask, Src0, VT, MMO,
3831                                    ISD::NON_EXTLOAD, IsExpanding);
3832   if (AddToChain) {
3833     SDValue OutChain = Load.getValue(1);
3834     DAG.setRoot(OutChain);
3835   }
3836   setValue(&I, Load);
3837 }
3838 
3839 void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
3840   SDLoc sdl = getCurSDLoc();
3841 
3842   // @llvm.masked.gather.*(Ptrs, alignment, Mask, Src0)
3843   const Value *Ptr = I.getArgOperand(0);
3844   SDValue Src0 = getValue(I.getArgOperand(3));
3845   SDValue Mask = getValue(I.getArgOperand(2));
3846 
3847   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3848   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3849   unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(1)))->getZExtValue();
3850   if (!Alignment)
3851     Alignment = DAG.getEVTAlignment(VT);
3852 
3853   AAMDNodes AAInfo;
3854   I.getAAMetadata(AAInfo);
3855   const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
3856 
3857   SDValue Root = DAG.getRoot();
3858   SDValue Base;
3859   SDValue Index;
3860   const Value *BasePtr = Ptr;
3861   bool UniformBase = getUniformBase(BasePtr, Base, Index, this);
3862   bool ConstantMemory = false;
3863   if (UniformBase &&
3864       AA->pointsToConstantMemory(MemoryLocation(
3865           BasePtr, DAG.getDataLayout().getTypeStoreSize(I.getType()),
3866           AAInfo))) {
3867     // Do not serialize (non-volatile) loads of constant memory with anything.
3868     Root = DAG.getEntryNode();
3869     ConstantMemory = true;
3870   }
3871 
3872   MachineMemOperand *MMO =
3873     DAG.getMachineFunction().
3874     getMachineMemOperand(MachinePointerInfo(UniformBase ? BasePtr : nullptr),
3875                          MachineMemOperand::MOLoad,  VT.getStoreSize(),
3876                          Alignment, AAInfo, Ranges);
3877 
3878   if (!UniformBase) {
3879     Base = DAG.getTargetConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
3880     Index = getValue(Ptr);
3881   }
3882   SDValue Ops[] = { Root, Src0, Mask, Base, Index };
3883   SDValue Gather = DAG.getMaskedGather(DAG.getVTList(VT, MVT::Other), VT, sdl,
3884                                        Ops, MMO);
3885 
3886   SDValue OutChain = Gather.getValue(1);
3887   if (!ConstantMemory)
3888     PendingLoads.push_back(OutChain);
3889   setValue(&I, Gather);
3890 }
3891 
3892 void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
3893   SDLoc dl = getCurSDLoc();
3894   AtomicOrdering SuccessOrder = I.getSuccessOrdering();
3895   AtomicOrdering FailureOrder = I.getFailureOrdering();
3896   SynchronizationScope Scope = I.getSynchScope();
3897 
3898   SDValue InChain = getRoot();
3899 
3900   MVT MemVT = getValue(I.getCompareOperand()).getSimpleValueType();
3901   SDVTList VTs = DAG.getVTList(MemVT, MVT::i1, MVT::Other);
3902   SDValue L = DAG.getAtomicCmpSwap(
3903       ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, dl, MemVT, VTs, InChain,
3904       getValue(I.getPointerOperand()), getValue(I.getCompareOperand()),
3905       getValue(I.getNewValOperand()), MachinePointerInfo(I.getPointerOperand()),
3906       /*Alignment=*/ 0, SuccessOrder, FailureOrder, Scope);
3907 
3908   SDValue OutChain = L.getValue(2);
3909 
3910   setValue(&I, L);
3911   DAG.setRoot(OutChain);
3912 }
3913 
3914 void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
3915   SDLoc dl = getCurSDLoc();
3916   ISD::NodeType NT;
3917   switch (I.getOperation()) {
3918   default: llvm_unreachable("Unknown atomicrmw operation");
3919   case AtomicRMWInst::Xchg: NT = ISD::ATOMIC_SWAP; break;
3920   case AtomicRMWInst::Add:  NT = ISD::ATOMIC_LOAD_ADD; break;
3921   case AtomicRMWInst::Sub:  NT = ISD::ATOMIC_LOAD_SUB; break;
3922   case AtomicRMWInst::And:  NT = ISD::ATOMIC_LOAD_AND; break;
3923   case AtomicRMWInst::Nand: NT = ISD::ATOMIC_LOAD_NAND; break;
3924   case AtomicRMWInst::Or:   NT = ISD::ATOMIC_LOAD_OR; break;
3925   case AtomicRMWInst::Xor:  NT = ISD::ATOMIC_LOAD_XOR; break;
3926   case AtomicRMWInst::Max:  NT = ISD::ATOMIC_LOAD_MAX; break;
3927   case AtomicRMWInst::Min:  NT = ISD::ATOMIC_LOAD_MIN; break;
3928   case AtomicRMWInst::UMax: NT = ISD::ATOMIC_LOAD_UMAX; break;
3929   case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break;
3930   }
3931   AtomicOrdering Order = I.getOrdering();
3932   SynchronizationScope Scope = I.getSynchScope();
3933 
3934   SDValue InChain = getRoot();
3935 
3936   SDValue L =
3937     DAG.getAtomic(NT, dl,
3938                   getValue(I.getValOperand()).getSimpleValueType(),
3939                   InChain,
3940                   getValue(I.getPointerOperand()),
3941                   getValue(I.getValOperand()),
3942                   I.getPointerOperand(),
3943                   /* Alignment=*/ 0, Order, Scope);
3944 
3945   SDValue OutChain = L.getValue(1);
3946 
3947   setValue(&I, L);
3948   DAG.setRoot(OutChain);
3949 }
3950 
3951 void SelectionDAGBuilder::visitFence(const FenceInst &I) {
3952   SDLoc dl = getCurSDLoc();
3953   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3954   SDValue Ops[3];
3955   Ops[0] = getRoot();
3956   Ops[1] = DAG.getConstant((unsigned)I.getOrdering(), dl,
3957                            TLI.getFenceOperandTy(DAG.getDataLayout()));
3958   Ops[2] = DAG.getConstant(I.getSynchScope(), dl,
3959                            TLI.getFenceOperandTy(DAG.getDataLayout()));
3960   DAG.setRoot(DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops));
3961 }
3962 
3963 void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
3964   SDLoc dl = getCurSDLoc();
3965   AtomicOrdering Order = I.getOrdering();
3966   SynchronizationScope Scope = I.getSynchScope();
3967 
3968   SDValue InChain = getRoot();
3969 
3970   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3971   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3972 
3973   if (I.getAlignment() < VT.getSizeInBits() / 8)
3974     report_fatal_error("Cannot generate unaligned atomic load");
3975 
3976   MachineMemOperand *MMO =
3977       DAG.getMachineFunction().
3978       getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
3979                            MachineMemOperand::MOVolatile |
3980                            MachineMemOperand::MOLoad,
3981                            VT.getStoreSize(),
3982                            I.getAlignment() ? I.getAlignment() :
3983                                               DAG.getEVTAlignment(VT),
3984                            AAMDNodes(), nullptr, Scope, Order);
3985 
3986   InChain = TLI.prepareVolatileOrAtomicLoad(InChain, dl, DAG);
3987   SDValue L =
3988       DAG.getAtomic(ISD::ATOMIC_LOAD, dl, VT, VT, InChain,
3989                     getValue(I.getPointerOperand()), MMO);
3990 
3991   SDValue OutChain = L.getValue(1);
3992 
3993   setValue(&I, L);
3994   DAG.setRoot(OutChain);
3995 }
3996 
3997 void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
3998   SDLoc dl = getCurSDLoc();
3999 
4000   AtomicOrdering Order = I.getOrdering();
4001   SynchronizationScope Scope = I.getSynchScope();
4002 
4003   SDValue InChain = getRoot();
4004 
4005   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4006   EVT VT =
4007       TLI.getValueType(DAG.getDataLayout(), I.getValueOperand()->getType());
4008 
4009   if (I.getAlignment() < VT.getSizeInBits() / 8)
4010     report_fatal_error("Cannot generate unaligned atomic store");
4011 
4012   SDValue OutChain =
4013     DAG.getAtomic(ISD::ATOMIC_STORE, dl, VT,
4014                   InChain,
4015                   getValue(I.getPointerOperand()),
4016                   getValue(I.getValueOperand()),
4017                   I.getPointerOperand(), I.getAlignment(),
4018                   Order, Scope);
4019 
4020   DAG.setRoot(OutChain);
4021 }
4022 
4023 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
4024 /// node.
4025 void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
4026                                                unsigned Intrinsic) {
4027   // Ignore the callsite's attributes. A specific call site may be marked with
4028   // readnone, but the lowering code will expect the chain based on the
4029   // definition.
4030   const Function *F = I.getCalledFunction();
4031   bool HasChain = !F->doesNotAccessMemory();
4032   bool OnlyLoad = HasChain && F->onlyReadsMemory();
4033 
4034   // Build the operand list.
4035   SmallVector<SDValue, 8> Ops;
4036   if (HasChain) {  // If this intrinsic has side-effects, chainify it.
4037     if (OnlyLoad) {
4038       // We don't need to serialize loads against other loads.
4039       Ops.push_back(DAG.getRoot());
4040     } else {
4041       Ops.push_back(getRoot());
4042     }
4043   }
4044 
4045   // Info is set by getTgtMemInstrinsic
4046   TargetLowering::IntrinsicInfo Info;
4047   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4048   bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I, Intrinsic);
4049 
4050   // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
4051   if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID ||
4052       Info.opc == ISD::INTRINSIC_W_CHAIN)
4053     Ops.push_back(DAG.getTargetConstant(Intrinsic, getCurSDLoc(),
4054                                         TLI.getPointerTy(DAG.getDataLayout())));
4055 
4056   // Add all operands of the call to the operand list.
4057   for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
4058     SDValue Op = getValue(I.getArgOperand(i));
4059     Ops.push_back(Op);
4060   }
4061 
4062   SmallVector<EVT, 4> ValueVTs;
4063   ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);
4064 
4065   if (HasChain)
4066     ValueVTs.push_back(MVT::Other);
4067 
4068   SDVTList VTs = DAG.getVTList(ValueVTs);
4069 
4070   // Create the node.
4071   SDValue Result;
4072   if (IsTgtIntrinsic) {
4073     // This is target intrinsic that touches memory
4074     Result = DAG.getMemIntrinsicNode(Info.opc, getCurSDLoc(),
4075                                      VTs, Ops, Info.memVT,
4076                                    MachinePointerInfo(Info.ptrVal, Info.offset),
4077                                      Info.align, Info.vol,
4078                                      Info.readMem, Info.writeMem, Info.size);
4079   } else if (!HasChain) {
4080     Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurSDLoc(), VTs, Ops);
4081   } else if (!I.getType()->isVoidTy()) {
4082     Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurSDLoc(), VTs, Ops);
4083   } else {
4084     Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops);
4085   }
4086 
4087   if (HasChain) {
4088     SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
4089     if (OnlyLoad)
4090       PendingLoads.push_back(Chain);
4091     else
4092       DAG.setRoot(Chain);
4093   }
4094 
4095   if (!I.getType()->isVoidTy()) {
4096     if (VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
4097       EVT VT = TLI.getValueType(DAG.getDataLayout(), PTy);
4098       Result = DAG.getNode(ISD::BITCAST, getCurSDLoc(), VT, Result);
4099     } else
4100       Result = lowerRangeToAssertZExt(DAG, I, Result);
4101 
4102     setValue(&I, Result);
4103   }
4104 }
4105 
4106 /// GetSignificand - Get the significand and build it into a floating-point
4107 /// number with exponent of 1:
4108 ///
4109 ///   Op = (Op & 0x007fffff) | 0x3f800000;
4110 ///
4111 /// where Op is the hexadecimal representation of floating point value.
4112 static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl) {
4113   SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
4114                            DAG.getConstant(0x007fffff, dl, MVT::i32));
4115   SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
4116                            DAG.getConstant(0x3f800000, dl, MVT::i32));
4117   return DAG.getNode(ISD::BITCAST, dl, MVT::f32, t2);
4118 }
4119 
4120 /// GetExponent - Get the exponent:
4121 ///
4122 ///   (float)(int)(((Op & 0x7f800000) >> 23) - 127);
4123 ///
4124 /// where Op is the hexadecimal representation of floating point value.
4125 static SDValue GetExponent(SelectionDAG &DAG, SDValue Op,
4126                            const TargetLowering &TLI, const SDLoc &dl) {
4127   SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
4128                            DAG.getConstant(0x7f800000, dl, MVT::i32));
4129   SDValue t1 = DAG.getNode(
4130       ISD::SRL, dl, MVT::i32, t0,
4131       DAG.getConstant(23, dl, TLI.getPointerTy(DAG.getDataLayout())));
4132   SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
4133                            DAG.getConstant(127, dl, MVT::i32));
4134   return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
4135 }
4136 
4137 /// getF32Constant - Get 32-bit floating point constant.
4138 static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt,
4139                               const SDLoc &dl) {
4140   return DAG.getConstantFP(APFloat(APFloat::IEEEsingle(), APInt(32, Flt)), dl,
4141                            MVT::f32);
4142 }
4143 
4144 static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl,
4145                                        SelectionDAG &DAG) {
4146   // TODO: What fast-math-flags should be set on the floating-point nodes?
4147 
4148   //   IntegerPartOfX = ((int32_t)(t0);
4149   SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
4150 
4151   //   FractionalPartOfX = t0 - (float)IntegerPartOfX;
4152   SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
4153   SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
4154 
4155   //   IntegerPartOfX <<= 23;
4156   IntegerPartOfX = DAG.getNode(
4157       ISD::SHL, dl, MVT::i32, IntegerPartOfX,
4158       DAG.getConstant(23, dl, DAG.getTargetLoweringInfo().getPointerTy(
4159                                   DAG.getDataLayout())));
4160 
4161   SDValue TwoToFractionalPartOfX;
4162   if (LimitFloatPrecision <= 6) {
4163     // For floating-point precision of 6:
4164     //
4165     //   TwoToFractionalPartOfX =
4166     //     0.997535578f +
4167     //       (0.735607626f + 0.252464424f * x) * x;
4168     //
4169     // error 0.0144103317, which is 6 bits
4170     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4171                              getF32Constant(DAG, 0x3e814304, dl));
4172     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4173                              getF32Constant(DAG, 0x3f3c50c8, dl));
4174     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4175     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4176                                          getF32Constant(DAG, 0x3f7f5e7e, dl));
4177   } else if (LimitFloatPrecision <= 12) {
4178     // For floating-point precision of 12:
4179     //
4180     //   TwoToFractionalPartOfX =
4181     //     0.999892986f +
4182     //       (0.696457318f +
4183     //         (0.224338339f + 0.792043434e-1f * x) * x) * x;
4184     //
4185     // error 0.000107046256, which is 13 to 14 bits
4186     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4187                              getF32Constant(DAG, 0x3da235e3, dl));
4188     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4189                              getF32Constant(DAG, 0x3e65b8f3, dl));
4190     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4191     SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4192                              getF32Constant(DAG, 0x3f324b07, dl));
4193     SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4194     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4195                                          getF32Constant(DAG, 0x3f7ff8fd, dl));
4196   } else { // LimitFloatPrecision <= 18
4197     // For floating-point precision of 18:
4198     //
4199     //   TwoToFractionalPartOfX =
4200     //     0.999999982f +
4201     //       (0.693148872f +
4202     //         (0.240227044f +
4203     //           (0.554906021e-1f +
4204     //             (0.961591928e-2f +
4205     //               (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
4206     // error 2.47208000*10^(-7), which is better than 18 bits
4207     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4208                              getF32Constant(DAG, 0x3924b03e, dl));
4209     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4210                              getF32Constant(DAG, 0x3ab24b87, dl));
4211     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4212     SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4213                              getF32Constant(DAG, 0x3c1d8c17, dl));
4214     SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4215     SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4216                              getF32Constant(DAG, 0x3d634a1d, dl));
4217     SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4218     SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4219                              getF32Constant(DAG, 0x3e75fe14, dl));
4220     SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4221     SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
4222                               getF32Constant(DAG, 0x3f317234, dl));
4223     SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
4224     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
4225                                          getF32Constant(DAG, 0x3f800000, dl));
4226   }
4227 
4228   // Add the exponent into the result in integer domain.
4229   SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, TwoToFractionalPartOfX);
4230   return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
4231                      DAG.getNode(ISD::ADD, dl, MVT::i32, t13, IntegerPartOfX));
4232 }
4233 
4234 /// expandExp - Lower an exp intrinsic. Handles the special sequences for
4235 /// limited-precision mode.
4236 static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4237                          const TargetLowering &TLI) {
4238   if (Op.getValueType() == MVT::f32 &&
4239       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4240 
4241     // Put the exponent in the right bit position for later addition to the
4242     // final result:
4243     //
4244     //   #define LOG2OFe 1.4426950f
4245     //   t0 = Op * LOG2OFe
4246 
4247     // TODO: What fast-math-flags should be set here?
4248     SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
4249                              getF32Constant(DAG, 0x3fb8aa3b, dl));
4250     return getLimitedPrecisionExp2(t0, dl, DAG);
4251   }
4252 
4253   // No special expansion.
4254   return DAG.getNode(ISD::FEXP, dl, Op.getValueType(), Op);
4255 }
4256 
4257 /// expandLog - Lower a log intrinsic. Handles the special sequences for
4258 /// limited-precision mode.
4259 static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4260                          const TargetLowering &TLI) {
4261 
4262   // TODO: What fast-math-flags should be set on the floating-point nodes?
4263 
4264   if (Op.getValueType() == MVT::f32 &&
4265       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4266     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
4267 
4268     // Scale the exponent by log(2) [0.69314718f].
4269     SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
4270     SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
4271                                         getF32Constant(DAG, 0x3f317218, dl));
4272 
4273     // Get the significand and build it into a floating-point number with
4274     // exponent of 1.
4275     SDValue X = GetSignificand(DAG, Op1, dl);
4276 
4277     SDValue LogOfMantissa;
4278     if (LimitFloatPrecision <= 6) {
4279       // For floating-point precision of 6:
4280       //
4281       //   LogofMantissa =
4282       //     -1.1609546f +
4283       //       (1.4034025f - 0.23903021f * x) * x;
4284       //
4285       // error 0.0034276066, which is better than 8 bits
4286       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4287                                getF32Constant(DAG, 0xbe74c456, dl));
4288       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4289                                getF32Constant(DAG, 0x3fb3a2b1, dl));
4290       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4291       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4292                                   getF32Constant(DAG, 0x3f949a29, dl));
4293     } else if (LimitFloatPrecision <= 12) {
4294       // For floating-point precision of 12:
4295       //
4296       //   LogOfMantissa =
4297       //     -1.7417939f +
4298       //       (2.8212026f +
4299       //         (-1.4699568f +
4300       //           (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
4301       //
4302       // error 0.000061011436, which is 14 bits
4303       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4304                                getF32Constant(DAG, 0xbd67b6d6, dl));
4305       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4306                                getF32Constant(DAG, 0x3ee4f4b8, dl));
4307       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4308       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4309                                getF32Constant(DAG, 0x3fbc278b, dl));
4310       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4311       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4312                                getF32Constant(DAG, 0x40348e95, dl));
4313       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4314       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4315                                   getF32Constant(DAG, 0x3fdef31a, dl));
4316     } else { // LimitFloatPrecision <= 18
4317       // For floating-point precision of 18:
4318       //
4319       //   LogOfMantissa =
4320       //     -2.1072184f +
4321       //       (4.2372794f +
4322       //         (-3.7029485f +
4323       //           (2.2781945f +
4324       //             (-0.87823314f +
4325       //               (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
4326       //
4327       // error 0.0000023660568, which is better than 18 bits
4328       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4329                                getF32Constant(DAG, 0xbc91e5ac, dl));
4330       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4331                                getF32Constant(DAG, 0x3e4350aa, dl));
4332       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4333       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4334                                getF32Constant(DAG, 0x3f60d3e3, dl));
4335       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4336       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4337                                getF32Constant(DAG, 0x4011cdf0, dl));
4338       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4339       SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4340                                getF32Constant(DAG, 0x406cfd1c, dl));
4341       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4342       SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4343                                getF32Constant(DAG, 0x408797cb, dl));
4344       SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4345       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
4346                                   getF32Constant(DAG, 0x4006dcab, dl));
4347     }
4348 
4349     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, LogOfMantissa);
4350   }
4351 
4352   // No special expansion.
4353   return DAG.getNode(ISD::FLOG, dl, Op.getValueType(), Op);
4354 }
4355 
4356 /// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for
4357 /// limited-precision mode.
4358 static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4359                           const TargetLowering &TLI) {
4360 
4361   // TODO: What fast-math-flags should be set on the floating-point nodes?
4362 
4363   if (Op.getValueType() == MVT::f32 &&
4364       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4365     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
4366 
4367     // Get the exponent.
4368     SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
4369 
4370     // Get the significand and build it into a floating-point number with
4371     // exponent of 1.
4372     SDValue X = GetSignificand(DAG, Op1, dl);
4373 
4374     // Different possible minimax approximations of significand in
4375     // floating-point for various degrees of accuracy over [1,2].
4376     SDValue Log2ofMantissa;
4377     if (LimitFloatPrecision <= 6) {
4378       // For floating-point precision of 6:
4379       //
4380       //   Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
4381       //
4382       // error 0.0049451742, which is more than 7 bits
4383       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4384                                getF32Constant(DAG, 0xbeb08fe0, dl));
4385       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4386                                getF32Constant(DAG, 0x40019463, dl));
4387       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4388       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4389                                    getF32Constant(DAG, 0x3fd6633d, dl));
4390     } else if (LimitFloatPrecision <= 12) {
4391       // For floating-point precision of 12:
4392       //
4393       //   Log2ofMantissa =
4394       //     -2.51285454f +
4395       //       (4.07009056f +
4396       //         (-2.12067489f +
4397       //           (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
4398       //
4399       // error 0.0000876136000, which is better than 13 bits
4400       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4401                                getF32Constant(DAG, 0xbda7262e, dl));
4402       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4403                                getF32Constant(DAG, 0x3f25280b, dl));
4404       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4405       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4406                                getF32Constant(DAG, 0x4007b923, dl));
4407       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4408       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4409                                getF32Constant(DAG, 0x40823e2f, dl));
4410       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4411       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4412                                    getF32Constant(DAG, 0x4020d29c, dl));
4413     } else { // LimitFloatPrecision <= 18
4414       // For floating-point precision of 18:
4415       //
4416       //   Log2ofMantissa =
4417       //     -3.0400495f +
4418       //       (6.1129976f +
4419       //         (-5.3420409f +
4420       //           (3.2865683f +
4421       //             (-1.2669343f +
4422       //               (0.27515199f -
4423       //                 0.25691327e-1f * x) * x) * x) * x) * x) * x;
4424       //
4425       // error 0.0000018516, which is better than 18 bits
4426       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4427                                getF32Constant(DAG, 0xbcd2769e, dl));
4428       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4429                                getF32Constant(DAG, 0x3e8ce0b9, dl));
4430       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4431       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4432                                getF32Constant(DAG, 0x3fa22ae7, dl));
4433       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4434       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4435                                getF32Constant(DAG, 0x40525723, dl));
4436       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4437       SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4438                                getF32Constant(DAG, 0x40aaf200, dl));
4439       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4440       SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4441                                getF32Constant(DAG, 0x40c39dad, dl));
4442       SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4443       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
4444                                    getF32Constant(DAG, 0x4042902c, dl));
4445     }
4446 
4447     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
4448   }
4449 
4450   // No special expansion.
4451   return DAG.getNode(ISD::FLOG2, dl, Op.getValueType(), Op);
4452 }
4453 
4454 /// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for
4455 /// limited-precision mode.
4456 static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4457                            const TargetLowering &TLI) {
4458 
4459   // TODO: What fast-math-flags should be set on the floating-point nodes?
4460 
4461   if (Op.getValueType() == MVT::f32 &&
4462       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4463     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
4464 
4465     // Scale the exponent by log10(2) [0.30102999f].
4466     SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
4467     SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
4468                                         getF32Constant(DAG, 0x3e9a209a, dl));
4469 
4470     // Get the significand and build it into a floating-point number with
4471     // exponent of 1.
4472     SDValue X = GetSignificand(DAG, Op1, dl);
4473 
4474     SDValue Log10ofMantissa;
4475     if (LimitFloatPrecision <= 6) {
4476       // For floating-point precision of 6:
4477       //
4478       //   Log10ofMantissa =
4479       //     -0.50419619f +
4480       //       (0.60948995f - 0.10380950f * x) * x;
4481       //
4482       // error 0.0014886165, which is 6 bits
4483       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4484                                getF32Constant(DAG, 0xbdd49a13, dl));
4485       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4486                                getF32Constant(DAG, 0x3f1c0789, dl));
4487       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4488       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4489                                     getF32Constant(DAG, 0x3f011300, dl));
4490     } else if (LimitFloatPrecision <= 12) {
4491       // For floating-point precision of 12:
4492       //
4493       //   Log10ofMantissa =
4494       //     -0.64831180f +
4495       //       (0.91751397f +
4496       //         (-0.31664806f + 0.47637168e-1f * x) * x) * x;
4497       //
4498       // error 0.00019228036, which is better than 12 bits
4499       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4500                                getF32Constant(DAG, 0x3d431f31, dl));
4501       SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
4502                                getF32Constant(DAG, 0x3ea21fb2, dl));
4503       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4504       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4505                                getF32Constant(DAG, 0x3f6ae232, dl));
4506       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4507       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
4508                                     getF32Constant(DAG, 0x3f25f7c3, dl));
4509     } else { // LimitFloatPrecision <= 18
4510       // For floating-point precision of 18:
4511       //
4512       //   Log10ofMantissa =
4513       //     -0.84299375f +
4514       //       (1.5327582f +
4515       //         (-1.0688956f +
4516       //           (0.49102474f +
4517       //             (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
4518       //
4519       // error 0.0000037995730, which is better than 18 bits
4520       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4521                                getF32Constant(DAG, 0x3c5d51ce, dl));
4522       SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
4523                                getF32Constant(DAG, 0x3e00685a, dl));
4524       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4525       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4526                                getF32Constant(DAG, 0x3efb6798, dl));
4527       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4528       SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
4529                                getF32Constant(DAG, 0x3f88d192, dl));
4530       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4531       SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4532                                getF32Constant(DAG, 0x3fc4316c, dl));
4533       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4534       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
4535                                     getF32Constant(DAG, 0x3f57ce70, dl));
4536     }
4537 
4538     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
4539   }
4540 
4541   // No special expansion.
4542   return DAG.getNode(ISD::FLOG10, dl, Op.getValueType(), Op);
4543 }
4544 
4545 /// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for
4546 /// limited-precision mode.
4547 static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4548                           const TargetLowering &TLI) {
4549   if (Op.getValueType() == MVT::f32 &&
4550       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18)
4551     return getLimitedPrecisionExp2(Op, dl, DAG);
4552 
4553   // No special expansion.
4554   return DAG.getNode(ISD::FEXP2, dl, Op.getValueType(), Op);
4555 }
4556 
4557 /// visitPow - Lower a pow intrinsic. Handles the special sequences for
4558 /// limited-precision mode with x == 10.0f.
4559 static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS,
4560                          SelectionDAG &DAG, const TargetLowering &TLI) {
4561   bool IsExp10 = false;
4562   if (LHS.getValueType() == MVT::f32 && RHS.getValueType() == MVT::f32 &&
4563       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4564     if (ConstantFPSDNode *LHSC = dyn_cast<ConstantFPSDNode>(LHS)) {
4565       APFloat Ten(10.0f);
4566       IsExp10 = LHSC->isExactlyValue(Ten);
4567     }
4568   }
4569 
4570   // TODO: What fast-math-flags should be set on the FMUL node?
4571   if (IsExp10) {
4572     // Put the exponent in the right bit position for later addition to the
4573     // final result:
4574     //
4575     //   #define LOG2OF10 3.3219281f
4576     //   t0 = Op * LOG2OF10;
4577     SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, RHS,
4578                              getF32Constant(DAG, 0x40549a78, dl));
4579     return getLimitedPrecisionExp2(t0, dl, DAG);
4580   }
4581 
4582   // No special expansion.
4583   return DAG.getNode(ISD::FPOW, dl, LHS.getValueType(), LHS, RHS);
4584 }
4585 
4586 
4587 /// ExpandPowI - Expand a llvm.powi intrinsic.
4588 static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS,
4589                           SelectionDAG &DAG) {
4590   // If RHS is a constant, we can expand this out to a multiplication tree,
4591   // otherwise we end up lowering to a call to __powidf2 (for example).  When
4592   // optimizing for size, we only want to do this if the expansion would produce
4593   // a small number of multiplies, otherwise we do the full expansion.
4594   if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
4595     // Get the exponent as a positive value.
4596     unsigned Val = RHSC->getSExtValue();
4597     if ((int)Val < 0) Val = -Val;
4598 
4599     // powi(x, 0) -> 1.0
4600     if (Val == 0)
4601       return DAG.getConstantFP(1.0, DL, LHS.getValueType());
4602 
4603     const Function *F = DAG.getMachineFunction().getFunction();
4604     if (!F->optForSize() ||
4605         // If optimizing for size, don't insert too many multiplies.
4606         // This inserts up to 5 multiplies.
4607         countPopulation(Val) + Log2_32(Val) < 7) {
4608       // We use the simple binary decomposition method to generate the multiply
4609       // sequence.  There are more optimal ways to do this (for example,
4610       // powi(x,15) generates one more multiply than it should), but this has
4611       // the benefit of being both really simple and much better than a libcall.
4612       SDValue Res;  // Logically starts equal to 1.0
4613       SDValue CurSquare = LHS;
4614       // TODO: Intrinsics should have fast-math-flags that propagate to these
4615       // nodes.
4616       while (Val) {
4617         if (Val & 1) {
4618           if (Res.getNode())
4619             Res = DAG.getNode(ISD::FMUL, DL,Res.getValueType(), Res, CurSquare);
4620           else
4621             Res = CurSquare;  // 1.0*CurSquare.
4622         }
4623 
4624         CurSquare = DAG.getNode(ISD::FMUL, DL, CurSquare.getValueType(),
4625                                 CurSquare, CurSquare);
4626         Val >>= 1;
4627       }
4628 
4629       // If the original was negative, invert the result, producing 1/(x*x*x).
4630       if (RHSC->getSExtValue() < 0)
4631         Res = DAG.getNode(ISD::FDIV, DL, LHS.getValueType(),
4632                           DAG.getConstantFP(1.0, DL, LHS.getValueType()), Res);
4633       return Res;
4634     }
4635   }
4636 
4637   // Otherwise, expand to a libcall.
4638   return DAG.getNode(ISD::FPOWI, DL, LHS.getValueType(), LHS, RHS);
4639 }
4640 
4641 // getUnderlyingArgReg - Find underlying register used for a truncated or
4642 // bitcasted argument.
4643 static unsigned getUnderlyingArgReg(const SDValue &N) {
4644   switch (N.getOpcode()) {
4645   case ISD::CopyFromReg:
4646     return cast<RegisterSDNode>(N.getOperand(1))->getReg();
4647   case ISD::BITCAST:
4648   case ISD::AssertZext:
4649   case ISD::AssertSext:
4650   case ISD::TRUNCATE:
4651     return getUnderlyingArgReg(N.getOperand(0));
4652   default:
4653     return 0;
4654   }
4655 }
4656 
4657 /// EmitFuncArgumentDbgValue - If the DbgValueInst is a dbg_value of a function
4658 /// argument, create the corresponding DBG_VALUE machine instruction for it now.
4659 /// At the end of instruction selection, they will be inserted to the entry BB.
4660 bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
4661     const Value *V, DILocalVariable *Variable, DIExpression *Expr,
4662     DILocation *DL, int64_t Offset, bool IsDbgDeclare, const SDValue &N) {
4663   const Argument *Arg = dyn_cast<Argument>(V);
4664   if (!Arg)
4665     return false;
4666 
4667   MachineFunction &MF = DAG.getMachineFunction();
4668   const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
4669 
4670   // Ignore inlined function arguments here.
4671   //
4672   // FIXME: Should we be checking DL->inlinedAt() to determine this?
4673   if (!Variable->getScope()->getSubprogram()->describes(MF.getFunction()))
4674     return false;
4675 
4676   bool IsIndirect = false;
4677   Optional<MachineOperand> Op;
4678   // Some arguments' frame index is recorded during argument lowering.
4679   if (int FI = FuncInfo.getArgumentFrameIndex(Arg))
4680     Op = MachineOperand::CreateFI(FI);
4681 
4682   if (!Op && N.getNode()) {
4683     unsigned Reg = getUnderlyingArgReg(N);
4684     if (Reg && TargetRegisterInfo::isVirtualRegister(Reg)) {
4685       MachineRegisterInfo &RegInfo = MF.getRegInfo();
4686       unsigned PR = RegInfo.getLiveInPhysReg(Reg);
4687       if (PR)
4688         Reg = PR;
4689     }
4690     if (Reg) {
4691       Op = MachineOperand::CreateReg(Reg, false);
4692       IsIndirect = IsDbgDeclare;
4693     }
4694   }
4695 
4696   if (!Op) {
4697     // Check if ValueMap has reg number.
4698     DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
4699     if (VMI != FuncInfo.ValueMap.end()) {
4700       Op = MachineOperand::CreateReg(VMI->second, false);
4701       IsIndirect = IsDbgDeclare;
4702     }
4703   }
4704 
4705   if (!Op && N.getNode())
4706     // Check if frame index is available.
4707     if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(N.getNode()))
4708       if (FrameIndexSDNode *FINode =
4709           dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
4710         Op = MachineOperand::CreateFI(FINode->getIndex());
4711 
4712   if (!Op)
4713     return false;
4714 
4715   assert(Variable->isValidLocationForIntrinsic(DL) &&
4716          "Expected inlined-at fields to agree");
4717   if (Op->isReg())
4718     FuncInfo.ArgDbgValues.push_back(
4719         BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsIndirect,
4720                 Op->getReg(), Offset, Variable, Expr));
4721   else
4722     FuncInfo.ArgDbgValues.push_back(
4723         BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE))
4724             .add(*Op)
4725             .addImm(Offset)
4726             .addMetadata(Variable)
4727             .addMetadata(Expr));
4728 
4729   return true;
4730 }
4731 
4732 /// Return the appropriate SDDbgValue based on N.
4733 SDDbgValue *SelectionDAGBuilder::getDbgValue(SDValue N,
4734                                              DILocalVariable *Variable,
4735                                              DIExpression *Expr, int64_t Offset,
4736                                              const DebugLoc &dl,
4737                                              unsigned DbgSDNodeOrder) {
4738   SDDbgValue *SDV;
4739   auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode());
4740   if (FISDN && Expr->startsWithDeref()) {
4741     // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can describe
4742     // stack slot locations as such instead of as indirectly addressed
4743     // locations.
4744     ArrayRef<uint64_t> TrailingElements(Expr->elements_begin() + 1,
4745                                         Expr->elements_end());
4746     DIExpression *DerefedDIExpr =
4747         DIExpression::get(*DAG.getContext(), TrailingElements);
4748     int FI = FISDN->getIndex();
4749     SDV = DAG.getFrameIndexDbgValue(Variable, DerefedDIExpr, FI, 0, dl,
4750                                     DbgSDNodeOrder);
4751   } else {
4752     SDV = DAG.getDbgValue(Variable, Expr, N.getNode(), N.getResNo(), false,
4753                           Offset, dl, DbgSDNodeOrder);
4754   }
4755   return SDV;
4756 }
4757 
4758 // VisualStudio defines setjmp as _setjmp
4759 #if defined(_MSC_VER) && defined(setjmp) && \
4760                          !defined(setjmp_undefined_for_msvc)
4761 #  pragma push_macro("setjmp")
4762 #  undef setjmp
4763 #  define setjmp_undefined_for_msvc
4764 #endif
4765 
4766 /// Lower the call to the specified intrinsic function. If we want to emit this
4767 /// as a call to a named external function, return the name. Otherwise, lower it
4768 /// and return null.
4769 const char *
4770 SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
4771   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4772   SDLoc sdl = getCurSDLoc();
4773   DebugLoc dl = getCurDebugLoc();
4774   SDValue Res;
4775 
4776   switch (Intrinsic) {
4777   default:
4778     // By default, turn this into a target intrinsic node.
4779     visitTargetIntrinsic(I, Intrinsic);
4780     return nullptr;
4781   case Intrinsic::vastart:  visitVAStart(I); return nullptr;
4782   case Intrinsic::vaend:    visitVAEnd(I); return nullptr;
4783   case Intrinsic::vacopy:   visitVACopy(I); return nullptr;
4784   case Intrinsic::returnaddress:
4785     setValue(&I, DAG.getNode(ISD::RETURNADDR, sdl,
4786                              TLI.getPointerTy(DAG.getDataLayout()),
4787                              getValue(I.getArgOperand(0))));
4788     return nullptr;
4789   case Intrinsic::addressofreturnaddress:
4790     setValue(&I, DAG.getNode(ISD::ADDROFRETURNADDR, sdl,
4791                              TLI.getPointerTy(DAG.getDataLayout())));
4792     return nullptr;
4793   case Intrinsic::frameaddress:
4794     setValue(&I, DAG.getNode(ISD::FRAMEADDR, sdl,
4795                              TLI.getPointerTy(DAG.getDataLayout()),
4796                              getValue(I.getArgOperand(0))));
4797     return nullptr;
4798   case Intrinsic::read_register: {
4799     Value *Reg = I.getArgOperand(0);
4800     SDValue Chain = getRoot();
4801     SDValue RegName =
4802         DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
4803     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4804     Res = DAG.getNode(ISD::READ_REGISTER, sdl,
4805       DAG.getVTList(VT, MVT::Other), Chain, RegName);
4806     setValue(&I, Res);
4807     DAG.setRoot(Res.getValue(1));
4808     return nullptr;
4809   }
4810   case Intrinsic::write_register: {
4811     Value *Reg = I.getArgOperand(0);
4812     Value *RegValue = I.getArgOperand(1);
4813     SDValue Chain = getRoot();
4814     SDValue RegName =
4815         DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
4816     DAG.setRoot(DAG.getNode(ISD::WRITE_REGISTER, sdl, MVT::Other, Chain,
4817                             RegName, getValue(RegValue)));
4818     return nullptr;
4819   }
4820   case Intrinsic::setjmp:
4821     return &"_setjmp"[!TLI.usesUnderscoreSetJmp()];
4822   case Intrinsic::longjmp:
4823     return &"_longjmp"[!TLI.usesUnderscoreLongJmp()];
4824   case Intrinsic::memcpy: {
4825     SDValue Op1 = getValue(I.getArgOperand(0));
4826     SDValue Op2 = getValue(I.getArgOperand(1));
4827     SDValue Op3 = getValue(I.getArgOperand(2));
4828     unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
4829     if (!Align)
4830       Align = 1; // @llvm.memcpy defines 0 and 1 to both mean no alignment.
4831     bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
4832     bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
4833     SDValue MC = DAG.getMemcpy(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
4834                                false, isTC,
4835                                MachinePointerInfo(I.getArgOperand(0)),
4836                                MachinePointerInfo(I.getArgOperand(1)));
4837     updateDAGForMaybeTailCall(MC);
4838     return nullptr;
4839   }
4840   case Intrinsic::memset: {
4841     SDValue Op1 = getValue(I.getArgOperand(0));
4842     SDValue Op2 = getValue(I.getArgOperand(1));
4843     SDValue Op3 = getValue(I.getArgOperand(2));
4844     unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
4845     if (!Align)
4846       Align = 1; // @llvm.memset defines 0 and 1 to both mean no alignment.
4847     bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
4848     bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
4849     SDValue MS = DAG.getMemset(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
4850                                isTC, MachinePointerInfo(I.getArgOperand(0)));
4851     updateDAGForMaybeTailCall(MS);
4852     return nullptr;
4853   }
4854   case Intrinsic::memmove: {
4855     SDValue Op1 = getValue(I.getArgOperand(0));
4856     SDValue Op2 = getValue(I.getArgOperand(1));
4857     SDValue Op3 = getValue(I.getArgOperand(2));
4858     unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
4859     if (!Align)
4860       Align = 1; // @llvm.memmove defines 0 and 1 to both mean no alignment.
4861     bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
4862     bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
4863     SDValue MM = DAG.getMemmove(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
4864                                 isTC, MachinePointerInfo(I.getArgOperand(0)),
4865                                 MachinePointerInfo(I.getArgOperand(1)));
4866     updateDAGForMaybeTailCall(MM);
4867     return nullptr;
4868   }
4869   case Intrinsic::memcpy_element_atomic: {
4870     SDValue Dst = getValue(I.getArgOperand(0));
4871     SDValue Src = getValue(I.getArgOperand(1));
4872     SDValue NumElements = getValue(I.getArgOperand(2));
4873     SDValue ElementSize = getValue(I.getArgOperand(3));
4874 
4875     // Emit a library call.
4876     TargetLowering::ArgListTy Args;
4877     TargetLowering::ArgListEntry Entry;
4878     Entry.Ty = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
4879     Entry.Node = Dst;
4880     Args.push_back(Entry);
4881 
4882     Entry.Node = Src;
4883     Args.push_back(Entry);
4884 
4885     Entry.Ty = I.getArgOperand(2)->getType();
4886     Entry.Node = NumElements;
4887     Args.push_back(Entry);
4888 
4889     Entry.Ty = Type::getInt32Ty(*DAG.getContext());
4890     Entry.Node = ElementSize;
4891     Args.push_back(Entry);
4892 
4893     uint64_t ElementSizeConstant =
4894         cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
4895     RTLIB::Libcall LibraryCall =
4896         RTLIB::getMEMCPY_ELEMENT_ATOMIC(ElementSizeConstant);
4897     if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
4898       report_fatal_error("Unsupported element size");
4899 
4900     TargetLowering::CallLoweringInfo CLI(DAG);
4901     CLI.setDebugLoc(sdl).setChain(getRoot()).setLibCallee(
4902         TLI.getLibcallCallingConv(LibraryCall),
4903         Type::getVoidTy(*DAG.getContext()),
4904         DAG.getExternalSymbol(TLI.getLibcallName(LibraryCall),
4905                               TLI.getPointerTy(DAG.getDataLayout())),
4906         std::move(Args));
4907 
4908     std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
4909     DAG.setRoot(CallResult.second);
4910     return nullptr;
4911   }
4912   case Intrinsic::dbg_declare: {
4913     const DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
4914     DILocalVariable *Variable = DI.getVariable();
4915     DIExpression *Expression = DI.getExpression();
4916     const Value *Address = DI.getAddress();
4917     assert(Variable && "Missing variable");
4918     if (!Address) {
4919       DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
4920       return nullptr;
4921     }
4922 
4923     // Check if address has undef value.
4924     if (isa<UndefValue>(Address) ||
4925         (Address->use_empty() && !isa<Argument>(Address))) {
4926       DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
4927       return nullptr;
4928     }
4929 
4930     SDValue &N = NodeMap[Address];
4931     if (!N.getNode() && isa<Argument>(Address))
4932       // Check unused arguments map.
4933       N = UnusedArgNodeMap[Address];
4934     SDDbgValue *SDV;
4935     if (N.getNode()) {
4936       if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
4937         Address = BCI->getOperand(0);
4938       // Parameters are handled specially.
4939       bool isParameter = Variable->isParameter() || isa<Argument>(Address);
4940       auto FINode = dyn_cast<FrameIndexSDNode>(N.getNode());
4941       if (isParameter && FINode) {
4942         // Byval parameter. We have a frame index at this point.
4943         SDV = DAG.getFrameIndexDbgValue(Variable, Expression,
4944                                         FINode->getIndex(), 0, dl, SDNodeOrder);
4945       } else if (isa<Argument>(Address)) {
4946         // Address is an argument, so try to emit its dbg value using
4947         // virtual register info from the FuncInfo.ValueMap.
4948         EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, 0, true, N);
4949         return nullptr;
4950       } else {
4951         SDV = DAG.getDbgValue(Variable, Expression, N.getNode(), N.getResNo(),
4952                               true, 0, dl, SDNodeOrder);
4953       }
4954       DAG.AddDbgValue(SDV, N.getNode(), isParameter);
4955     } else {
4956       // If Address is an argument then try to emit its dbg value using
4957       // virtual register info from the FuncInfo.ValueMap.
4958       if (!EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, 0, true,
4959                                     N)) {
4960         // If variable is pinned by a alloca in dominating bb then
4961         // use StaticAllocaMap.
4962         if (const AllocaInst *AI = dyn_cast<AllocaInst>(Address)) {
4963           if (AI->getParent() != DI.getParent()) {
4964             DenseMap<const AllocaInst*, int>::iterator SI =
4965               FuncInfo.StaticAllocaMap.find(AI);
4966             if (SI != FuncInfo.StaticAllocaMap.end()) {
4967               SDV = DAG.getFrameIndexDbgValue(Variable, Expression, SI->second,
4968                                               0, dl, SDNodeOrder);
4969               DAG.AddDbgValue(SDV, nullptr, false);
4970               return nullptr;
4971             }
4972           }
4973         }
4974         DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
4975       }
4976     }
4977     return nullptr;
4978   }
4979   case Intrinsic::dbg_value: {
4980     const DbgValueInst &DI = cast<DbgValueInst>(I);
4981     assert(DI.getVariable() && "Missing variable");
4982 
4983     DILocalVariable *Variable = DI.getVariable();
4984     DIExpression *Expression = DI.getExpression();
4985     uint64_t Offset = DI.getOffset();
4986     const Value *V = DI.getValue();
4987     if (!V)
4988       return nullptr;
4989 
4990     SDDbgValue *SDV;
4991     if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V)) {
4992       SDV = DAG.getConstantDbgValue(Variable, Expression, V, Offset, dl,
4993                                     SDNodeOrder);
4994       DAG.AddDbgValue(SDV, nullptr, false);
4995       return nullptr;
4996     }
4997 
4998     // Do not use getValue() in here; we don't want to generate code at
4999     // this point if it hasn't been done yet.
5000     SDValue N = NodeMap[V];
5001     if (!N.getNode() && isa<Argument>(V)) // Check unused arguments map.
5002       N = UnusedArgNodeMap[V];
5003     if (N.getNode()) {
5004       if (EmitFuncArgumentDbgValue(V, Variable, Expression, dl, Offset, false,
5005                                    N))
5006         return nullptr;
5007       SDV = getDbgValue(N, Variable, Expression, Offset, dl, SDNodeOrder);
5008       DAG.AddDbgValue(SDV, N.getNode(), false);
5009       return nullptr;
5010     }
5011 
5012     if (!V->use_empty() ) {
5013       // Do not call getValue(V) yet, as we don't want to generate code.
5014       // Remember it for later.
5015       DanglingDebugInfo DDI(&DI, dl, SDNodeOrder);
5016       DanglingDebugInfoMap[V] = DDI;
5017       return nullptr;
5018     }
5019 
5020     DEBUG(dbgs() << "Dropping debug location info for:\n  " << DI << "\n");
5021     DEBUG(dbgs() << "  Last seen at:\n    " << *V << "\n");
5022     return nullptr;
5023   }
5024 
5025   case Intrinsic::eh_typeid_for: {
5026     // Find the type id for the given typeinfo.
5027     GlobalValue *GV = ExtractTypeInfo(I.getArgOperand(0));
5028     unsigned TypeID = DAG.getMachineFunction().getTypeIDFor(GV);
5029     Res = DAG.getConstant(TypeID, sdl, MVT::i32);
5030     setValue(&I, Res);
5031     return nullptr;
5032   }
5033 
5034   case Intrinsic::eh_return_i32:
5035   case Intrinsic::eh_return_i64:
5036     DAG.getMachineFunction().setCallsEHReturn(true);
5037     DAG.setRoot(DAG.getNode(ISD::EH_RETURN, sdl,
5038                             MVT::Other,
5039                             getControlRoot(),
5040                             getValue(I.getArgOperand(0)),
5041                             getValue(I.getArgOperand(1))));
5042     return nullptr;
5043   case Intrinsic::eh_unwind_init:
5044     DAG.getMachineFunction().setCallsUnwindInit(true);
5045     return nullptr;
5046   case Intrinsic::eh_dwarf_cfa: {
5047     setValue(&I, DAG.getNode(ISD::EH_DWARF_CFA, sdl,
5048                              TLI.getPointerTy(DAG.getDataLayout()),
5049                              getValue(I.getArgOperand(0))));
5050     return nullptr;
5051   }
5052   case Intrinsic::eh_sjlj_callsite: {
5053     MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
5054     ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(0));
5055     assert(CI && "Non-constant call site value in eh.sjlj.callsite!");
5056     assert(MMI.getCurrentCallSite() == 0 && "Overlapping call sites!");
5057 
5058     MMI.setCurrentCallSite(CI->getZExtValue());
5059     return nullptr;
5060   }
5061   case Intrinsic::eh_sjlj_functioncontext: {
5062     // Get and store the index of the function context.
5063     MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
5064     AllocaInst *FnCtx =
5065       cast<AllocaInst>(I.getArgOperand(0)->stripPointerCasts());
5066     int FI = FuncInfo.StaticAllocaMap[FnCtx];
5067     MFI.setFunctionContextIndex(FI);
5068     return nullptr;
5069   }
5070   case Intrinsic::eh_sjlj_setjmp: {
5071     SDValue Ops[2];
5072     Ops[0] = getRoot();
5073     Ops[1] = getValue(I.getArgOperand(0));
5074     SDValue Op = DAG.getNode(ISD::EH_SJLJ_SETJMP, sdl,
5075                              DAG.getVTList(MVT::i32, MVT::Other), Ops);
5076     setValue(&I, Op.getValue(0));
5077     DAG.setRoot(Op.getValue(1));
5078     return nullptr;
5079   }
5080   case Intrinsic::eh_sjlj_longjmp: {
5081     DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, sdl, MVT::Other,
5082                             getRoot(), getValue(I.getArgOperand(0))));
5083     return nullptr;
5084   }
5085   case Intrinsic::eh_sjlj_setup_dispatch: {
5086     DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_SETUP_DISPATCH, sdl, MVT::Other,
5087                             getRoot()));
5088     return nullptr;
5089   }
5090 
5091   case Intrinsic::masked_gather:
5092     visitMaskedGather(I);
5093     return nullptr;
5094   case Intrinsic::masked_load:
5095     visitMaskedLoad(I);
5096     return nullptr;
5097   case Intrinsic::masked_scatter:
5098     visitMaskedScatter(I);
5099     return nullptr;
5100   case Intrinsic::masked_store:
5101     visitMaskedStore(I);
5102     return nullptr;
5103   case Intrinsic::masked_expandload:
5104     visitMaskedLoad(I, true /* IsExpanding */);
5105     return nullptr;
5106   case Intrinsic::masked_compressstore:
5107     visitMaskedStore(I, true /* IsCompressing */);
5108     return nullptr;
5109   case Intrinsic::x86_mmx_pslli_w:
5110   case Intrinsic::x86_mmx_pslli_d:
5111   case Intrinsic::x86_mmx_pslli_q:
5112   case Intrinsic::x86_mmx_psrli_w:
5113   case Intrinsic::x86_mmx_psrli_d:
5114   case Intrinsic::x86_mmx_psrli_q:
5115   case Intrinsic::x86_mmx_psrai_w:
5116   case Intrinsic::x86_mmx_psrai_d: {
5117     SDValue ShAmt = getValue(I.getArgOperand(1));
5118     if (isa<ConstantSDNode>(ShAmt)) {
5119       visitTargetIntrinsic(I, Intrinsic);
5120       return nullptr;
5121     }
5122     unsigned NewIntrinsic = 0;
5123     EVT ShAmtVT = MVT::v2i32;
5124     switch (Intrinsic) {
5125     case Intrinsic::x86_mmx_pslli_w:
5126       NewIntrinsic = Intrinsic::x86_mmx_psll_w;
5127       break;
5128     case Intrinsic::x86_mmx_pslli_d:
5129       NewIntrinsic = Intrinsic::x86_mmx_psll_d;
5130       break;
5131     case Intrinsic::x86_mmx_pslli_q:
5132       NewIntrinsic = Intrinsic::x86_mmx_psll_q;
5133       break;
5134     case Intrinsic::x86_mmx_psrli_w:
5135       NewIntrinsic = Intrinsic::x86_mmx_psrl_w;
5136       break;
5137     case Intrinsic::x86_mmx_psrli_d:
5138       NewIntrinsic = Intrinsic::x86_mmx_psrl_d;
5139       break;
5140     case Intrinsic::x86_mmx_psrli_q:
5141       NewIntrinsic = Intrinsic::x86_mmx_psrl_q;
5142       break;
5143     case Intrinsic::x86_mmx_psrai_w:
5144       NewIntrinsic = Intrinsic::x86_mmx_psra_w;
5145       break;
5146     case Intrinsic::x86_mmx_psrai_d:
5147       NewIntrinsic = Intrinsic::x86_mmx_psra_d;
5148       break;
5149     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
5150     }
5151 
5152     // The vector shift intrinsics with scalars uses 32b shift amounts but
5153     // the sse2/mmx shift instructions reads 64 bits. Set the upper 32 bits
5154     // to be zero.
5155     // We must do this early because v2i32 is not a legal type.
5156     SDValue ShOps[2];
5157     ShOps[0] = ShAmt;
5158     ShOps[1] = DAG.getConstant(0, sdl, MVT::i32);
5159     ShAmt =  DAG.getBuildVector(ShAmtVT, sdl, ShOps);
5160     EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5161     ShAmt = DAG.getNode(ISD::BITCAST, sdl, DestVT, ShAmt);
5162     Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, sdl, DestVT,
5163                        DAG.getConstant(NewIntrinsic, sdl, MVT::i32),
5164                        getValue(I.getArgOperand(0)), ShAmt);
5165     setValue(&I, Res);
5166     return nullptr;
5167   }
5168   case Intrinsic::powi:
5169     setValue(&I, ExpandPowI(sdl, getValue(I.getArgOperand(0)),
5170                             getValue(I.getArgOperand(1)), DAG));
5171     return nullptr;
5172   case Intrinsic::log:
5173     setValue(&I, expandLog(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5174     return nullptr;
5175   case Intrinsic::log2:
5176     setValue(&I, expandLog2(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5177     return nullptr;
5178   case Intrinsic::log10:
5179     setValue(&I, expandLog10(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5180     return nullptr;
5181   case Intrinsic::exp:
5182     setValue(&I, expandExp(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5183     return nullptr;
5184   case Intrinsic::exp2:
5185     setValue(&I, expandExp2(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5186     return nullptr;
5187   case Intrinsic::pow:
5188     setValue(&I, expandPow(sdl, getValue(I.getArgOperand(0)),
5189                            getValue(I.getArgOperand(1)), DAG, TLI));
5190     return nullptr;
5191   case Intrinsic::sqrt:
5192   case Intrinsic::fabs:
5193   case Intrinsic::sin:
5194   case Intrinsic::cos:
5195   case Intrinsic::floor:
5196   case Intrinsic::ceil:
5197   case Intrinsic::trunc:
5198   case Intrinsic::rint:
5199   case Intrinsic::nearbyint:
5200   case Intrinsic::round:
5201   case Intrinsic::canonicalize: {
5202     unsigned Opcode;
5203     switch (Intrinsic) {
5204     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
5205     case Intrinsic::sqrt:      Opcode = ISD::FSQRT;      break;
5206     case Intrinsic::fabs:      Opcode = ISD::FABS;       break;
5207     case Intrinsic::sin:       Opcode = ISD::FSIN;       break;
5208     case Intrinsic::cos:       Opcode = ISD::FCOS;       break;
5209     case Intrinsic::floor:     Opcode = ISD::FFLOOR;     break;
5210     case Intrinsic::ceil:      Opcode = ISD::FCEIL;      break;
5211     case Intrinsic::trunc:     Opcode = ISD::FTRUNC;     break;
5212     case Intrinsic::rint:      Opcode = ISD::FRINT;      break;
5213     case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break;
5214     case Intrinsic::round:     Opcode = ISD::FROUND;     break;
5215     case Intrinsic::canonicalize: Opcode = ISD::FCANONICALIZE; break;
5216     }
5217 
5218     setValue(&I, DAG.getNode(Opcode, sdl,
5219                              getValue(I.getArgOperand(0)).getValueType(),
5220                              getValue(I.getArgOperand(0))));
5221     return nullptr;
5222   }
5223   case Intrinsic::minnum: {
5224     auto VT = getValue(I.getArgOperand(0)).getValueType();
5225     unsigned Opc =
5226         I.hasNoNaNs() && TLI.isOperationLegalOrCustom(ISD::FMINNAN, VT)
5227             ? ISD::FMINNAN
5228             : ISD::FMINNUM;
5229     setValue(&I, DAG.getNode(Opc, sdl, VT,
5230                              getValue(I.getArgOperand(0)),
5231                              getValue(I.getArgOperand(1))));
5232     return nullptr;
5233   }
5234   case Intrinsic::maxnum: {
5235     auto VT = getValue(I.getArgOperand(0)).getValueType();
5236     unsigned Opc =
5237         I.hasNoNaNs() && TLI.isOperationLegalOrCustom(ISD::FMAXNAN, VT)
5238             ? ISD::FMAXNAN
5239             : ISD::FMAXNUM;
5240     setValue(&I, DAG.getNode(Opc, sdl, VT,
5241                              getValue(I.getArgOperand(0)),
5242                              getValue(I.getArgOperand(1))));
5243     return nullptr;
5244   }
5245   case Intrinsic::copysign:
5246     setValue(&I, DAG.getNode(ISD::FCOPYSIGN, sdl,
5247                              getValue(I.getArgOperand(0)).getValueType(),
5248                              getValue(I.getArgOperand(0)),
5249                              getValue(I.getArgOperand(1))));
5250     return nullptr;
5251   case Intrinsic::fma:
5252     setValue(&I, DAG.getNode(ISD::FMA, sdl,
5253                              getValue(I.getArgOperand(0)).getValueType(),
5254                              getValue(I.getArgOperand(0)),
5255                              getValue(I.getArgOperand(1)),
5256                              getValue(I.getArgOperand(2))));
5257     return nullptr;
5258   case Intrinsic::experimental_constrained_fadd:
5259   case Intrinsic::experimental_constrained_fsub:
5260   case Intrinsic::experimental_constrained_fmul:
5261   case Intrinsic::experimental_constrained_fdiv:
5262   case Intrinsic::experimental_constrained_frem:
5263     visitConstrainedFPIntrinsic(I, Intrinsic);
5264     return nullptr;
5265   case Intrinsic::fmuladd: {
5266     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5267     if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
5268         TLI.isFMAFasterThanFMulAndFAdd(VT)) {
5269       setValue(&I, DAG.getNode(ISD::FMA, sdl,
5270                                getValue(I.getArgOperand(0)).getValueType(),
5271                                getValue(I.getArgOperand(0)),
5272                                getValue(I.getArgOperand(1)),
5273                                getValue(I.getArgOperand(2))));
5274     } else {
5275       // TODO: Intrinsic calls should have fast-math-flags.
5276       SDValue Mul = DAG.getNode(ISD::FMUL, sdl,
5277                                 getValue(I.getArgOperand(0)).getValueType(),
5278                                 getValue(I.getArgOperand(0)),
5279                                 getValue(I.getArgOperand(1)));
5280       SDValue Add = DAG.getNode(ISD::FADD, sdl,
5281                                 getValue(I.getArgOperand(0)).getValueType(),
5282                                 Mul,
5283                                 getValue(I.getArgOperand(2)));
5284       setValue(&I, Add);
5285     }
5286     return nullptr;
5287   }
5288   case Intrinsic::convert_to_fp16:
5289     setValue(&I, DAG.getNode(ISD::BITCAST, sdl, MVT::i16,
5290                              DAG.getNode(ISD::FP_ROUND, sdl, MVT::f16,
5291                                          getValue(I.getArgOperand(0)),
5292                                          DAG.getTargetConstant(0, sdl,
5293                                                                MVT::i32))));
5294     return nullptr;
5295   case Intrinsic::convert_from_fp16:
5296     setValue(&I, DAG.getNode(ISD::FP_EXTEND, sdl,
5297                              TLI.getValueType(DAG.getDataLayout(), I.getType()),
5298                              DAG.getNode(ISD::BITCAST, sdl, MVT::f16,
5299                                          getValue(I.getArgOperand(0)))));
5300     return nullptr;
5301   case Intrinsic::pcmarker: {
5302     SDValue Tmp = getValue(I.getArgOperand(0));
5303     DAG.setRoot(DAG.getNode(ISD::PCMARKER, sdl, MVT::Other, getRoot(), Tmp));
5304     return nullptr;
5305   }
5306   case Intrinsic::readcyclecounter: {
5307     SDValue Op = getRoot();
5308     Res = DAG.getNode(ISD::READCYCLECOUNTER, sdl,
5309                       DAG.getVTList(MVT::i64, MVT::Other), Op);
5310     setValue(&I, Res);
5311     DAG.setRoot(Res.getValue(1));
5312     return nullptr;
5313   }
5314   case Intrinsic::bitreverse:
5315     setValue(&I, DAG.getNode(ISD::BITREVERSE, sdl,
5316                              getValue(I.getArgOperand(0)).getValueType(),
5317                              getValue(I.getArgOperand(0))));
5318     return nullptr;
5319   case Intrinsic::bswap:
5320     setValue(&I, DAG.getNode(ISD::BSWAP, sdl,
5321                              getValue(I.getArgOperand(0)).getValueType(),
5322                              getValue(I.getArgOperand(0))));
5323     return nullptr;
5324   case Intrinsic::cttz: {
5325     SDValue Arg = getValue(I.getArgOperand(0));
5326     ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
5327     EVT Ty = Arg.getValueType();
5328     setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_UNDEF,
5329                              sdl, Ty, Arg));
5330     return nullptr;
5331   }
5332   case Intrinsic::ctlz: {
5333     SDValue Arg = getValue(I.getArgOperand(0));
5334     ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
5335     EVT Ty = Arg.getValueType();
5336     setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_UNDEF,
5337                              sdl, Ty, Arg));
5338     return nullptr;
5339   }
5340   case Intrinsic::ctpop: {
5341     SDValue Arg = getValue(I.getArgOperand(0));
5342     EVT Ty = Arg.getValueType();
5343     setValue(&I, DAG.getNode(ISD::CTPOP, sdl, Ty, Arg));
5344     return nullptr;
5345   }
5346   case Intrinsic::stacksave: {
5347     SDValue Op = getRoot();
5348     Res = DAG.getNode(
5349         ISD::STACKSAVE, sdl,
5350         DAG.getVTList(TLI.getPointerTy(DAG.getDataLayout()), MVT::Other), Op);
5351     setValue(&I, Res);
5352     DAG.setRoot(Res.getValue(1));
5353     return nullptr;
5354   }
5355   case Intrinsic::stackrestore: {
5356     Res = getValue(I.getArgOperand(0));
5357     DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other, getRoot(), Res));
5358     return nullptr;
5359   }
5360   case Intrinsic::get_dynamic_area_offset: {
5361     SDValue Op = getRoot();
5362     EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
5363     EVT ResTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
5364     // Result type for @llvm.get.dynamic.area.offset should match PtrTy for
5365     // target.
5366     if (PtrTy != ResTy)
5367       report_fatal_error("Wrong result type for @llvm.get.dynamic.area.offset"
5368                          " intrinsic!");
5369     Res = DAG.getNode(ISD::GET_DYNAMIC_AREA_OFFSET, sdl, DAG.getVTList(ResTy),
5370                       Op);
5371     DAG.setRoot(Op);
5372     setValue(&I, Res);
5373     return nullptr;
5374   }
5375   case Intrinsic::stackguard: {
5376     EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
5377     MachineFunction &MF = DAG.getMachineFunction();
5378     const Module &M = *MF.getFunction()->getParent();
5379     SDValue Chain = getRoot();
5380     if (TLI.useLoadStackGuardNode()) {
5381       Res = getLoadStackGuard(DAG, sdl, Chain);
5382     } else {
5383       const Value *Global = TLI.getSDagStackGuard(M);
5384       unsigned Align = DL->getPrefTypeAlignment(Global->getType());
5385       Res = DAG.getLoad(PtrTy, sdl, Chain, getValue(Global),
5386                         MachinePointerInfo(Global, 0), Align,
5387                         MachineMemOperand::MOVolatile);
5388     }
5389     DAG.setRoot(Chain);
5390     setValue(&I, Res);
5391     return nullptr;
5392   }
5393   case Intrinsic::stackprotector: {
5394     // Emit code into the DAG to store the stack guard onto the stack.
5395     MachineFunction &MF = DAG.getMachineFunction();
5396     MachineFrameInfo &MFI = MF.getFrameInfo();
5397     EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
5398     SDValue Src, Chain = getRoot();
5399 
5400     if (TLI.useLoadStackGuardNode())
5401       Src = getLoadStackGuard(DAG, sdl, Chain);
5402     else
5403       Src = getValue(I.getArgOperand(0));   // The guard's value.
5404 
5405     AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
5406 
5407     int FI = FuncInfo.StaticAllocaMap[Slot];
5408     MFI.setStackProtectorIndex(FI);
5409 
5410     SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
5411 
5412     // Store the stack protector onto the stack.
5413     Res = DAG.getStore(Chain, sdl, Src, FIN, MachinePointerInfo::getFixedStack(
5414                                                  DAG.getMachineFunction(), FI),
5415                        /* Alignment = */ 0, MachineMemOperand::MOVolatile);
5416     setValue(&I, Res);
5417     DAG.setRoot(Res);
5418     return nullptr;
5419   }
5420   case Intrinsic::objectsize: {
5421     // If we don't know by now, we're never going to know.
5422     ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(1));
5423 
5424     assert(CI && "Non-constant type in __builtin_object_size?");
5425 
5426     SDValue Arg = getValue(I.getCalledValue());
5427     EVT Ty = Arg.getValueType();
5428 
5429     if (CI->isZero())
5430       Res = DAG.getConstant(-1ULL, sdl, Ty);
5431     else
5432       Res = DAG.getConstant(0, sdl, Ty);
5433 
5434     setValue(&I, Res);
5435     return nullptr;
5436   }
5437   case Intrinsic::annotation:
5438   case Intrinsic::ptr_annotation:
5439   case Intrinsic::invariant_group_barrier:
5440     // Drop the intrinsic, but forward the value
5441     setValue(&I, getValue(I.getOperand(0)));
5442     return nullptr;
5443   case Intrinsic::assume:
5444   case Intrinsic::var_annotation:
5445     // Discard annotate attributes and assumptions
5446     return nullptr;
5447 
5448   case Intrinsic::init_trampoline: {
5449     const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts());
5450 
5451     SDValue Ops[6];
5452     Ops[0] = getRoot();
5453     Ops[1] = getValue(I.getArgOperand(0));
5454     Ops[2] = getValue(I.getArgOperand(1));
5455     Ops[3] = getValue(I.getArgOperand(2));
5456     Ops[4] = DAG.getSrcValue(I.getArgOperand(0));
5457     Ops[5] = DAG.getSrcValue(F);
5458 
5459     Res = DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other, Ops);
5460 
5461     DAG.setRoot(Res);
5462     return nullptr;
5463   }
5464   case Intrinsic::adjust_trampoline: {
5465     setValue(&I, DAG.getNode(ISD::ADJUST_TRAMPOLINE, sdl,
5466                              TLI.getPointerTy(DAG.getDataLayout()),
5467                              getValue(I.getArgOperand(0))));
5468     return nullptr;
5469   }
5470   case Intrinsic::gcroot: {
5471     MachineFunction &MF = DAG.getMachineFunction();
5472     const Function *F = MF.getFunction();
5473     (void)F;
5474     assert(F->hasGC() &&
5475            "only valid in functions with gc specified, enforced by Verifier");
5476     assert(GFI && "implied by previous");
5477     const Value *Alloca = I.getArgOperand(0)->stripPointerCasts();
5478     const Constant *TypeMap = cast<Constant>(I.getArgOperand(1));
5479 
5480     FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
5481     GFI->addStackRoot(FI->getIndex(), TypeMap);
5482     return nullptr;
5483   }
5484   case Intrinsic::gcread:
5485   case Intrinsic::gcwrite:
5486     llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
5487   case Intrinsic::flt_rounds:
5488     setValue(&I, DAG.getNode(ISD::FLT_ROUNDS_, sdl, MVT::i32));
5489     return nullptr;
5490 
5491   case Intrinsic::expect: {
5492     // Just replace __builtin_expect(exp, c) with EXP.
5493     setValue(&I, getValue(I.getArgOperand(0)));
5494     return nullptr;
5495   }
5496 
5497   case Intrinsic::debugtrap:
5498   case Intrinsic::trap: {
5499     StringRef TrapFuncName =
5500         I.getAttributes()
5501             .getAttribute(AttributeList::FunctionIndex, "trap-func-name")
5502             .getValueAsString();
5503     if (TrapFuncName.empty()) {
5504       ISD::NodeType Op = (Intrinsic == Intrinsic::trap) ?
5505         ISD::TRAP : ISD::DEBUGTRAP;
5506       DAG.setRoot(DAG.getNode(Op, sdl,MVT::Other, getRoot()));
5507       return nullptr;
5508     }
5509     TargetLowering::ArgListTy Args;
5510 
5511     TargetLowering::CallLoweringInfo CLI(DAG);
5512     CLI.setDebugLoc(sdl).setChain(getRoot()).setLibCallee(
5513         CallingConv::C, I.getType(),
5514         DAG.getExternalSymbol(TrapFuncName.data(),
5515                               TLI.getPointerTy(DAG.getDataLayout())),
5516         std::move(Args));
5517 
5518     std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
5519     DAG.setRoot(Result.second);
5520     return nullptr;
5521   }
5522 
5523   case Intrinsic::uadd_with_overflow:
5524   case Intrinsic::sadd_with_overflow:
5525   case Intrinsic::usub_with_overflow:
5526   case Intrinsic::ssub_with_overflow:
5527   case Intrinsic::umul_with_overflow:
5528   case Intrinsic::smul_with_overflow: {
5529     ISD::NodeType Op;
5530     switch (Intrinsic) {
5531     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
5532     case Intrinsic::uadd_with_overflow: Op = ISD::UADDO; break;
5533     case Intrinsic::sadd_with_overflow: Op = ISD::SADDO; break;
5534     case Intrinsic::usub_with_overflow: Op = ISD::USUBO; break;
5535     case Intrinsic::ssub_with_overflow: Op = ISD::SSUBO; break;
5536     case Intrinsic::umul_with_overflow: Op = ISD::UMULO; break;
5537     case Intrinsic::smul_with_overflow: Op = ISD::SMULO; break;
5538     }
5539     SDValue Op1 = getValue(I.getArgOperand(0));
5540     SDValue Op2 = getValue(I.getArgOperand(1));
5541 
5542     SDVTList VTs = DAG.getVTList(Op1.getValueType(), MVT::i1);
5543     setValue(&I, DAG.getNode(Op, sdl, VTs, Op1, Op2));
5544     return nullptr;
5545   }
5546   case Intrinsic::prefetch: {
5547     SDValue Ops[5];
5548     unsigned rw = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
5549     Ops[0] = getRoot();
5550     Ops[1] = getValue(I.getArgOperand(0));
5551     Ops[2] = getValue(I.getArgOperand(1));
5552     Ops[3] = getValue(I.getArgOperand(2));
5553     Ops[4] = getValue(I.getArgOperand(3));
5554     DAG.setRoot(DAG.getMemIntrinsicNode(ISD::PREFETCH, sdl,
5555                                         DAG.getVTList(MVT::Other), Ops,
5556                                         EVT::getIntegerVT(*Context, 8),
5557                                         MachinePointerInfo(I.getArgOperand(0)),
5558                                         0, /* align */
5559                                         false, /* volatile */
5560                                         rw==0, /* read */
5561                                         rw==1)); /* write */
5562     return nullptr;
5563   }
5564   case Intrinsic::lifetime_start:
5565   case Intrinsic::lifetime_end: {
5566     bool IsStart = (Intrinsic == Intrinsic::lifetime_start);
5567     // Stack coloring is not enabled in O0, discard region information.
5568     if (TM.getOptLevel() == CodeGenOpt::None)
5569       return nullptr;
5570 
5571     SmallVector<Value *, 4> Allocas;
5572     GetUnderlyingObjects(I.getArgOperand(1), Allocas, *DL);
5573 
5574     for (SmallVectorImpl<Value*>::iterator Object = Allocas.begin(),
5575            E = Allocas.end(); Object != E; ++Object) {
5576       AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(*Object);
5577 
5578       // Could not find an Alloca.
5579       if (!LifetimeObject)
5580         continue;
5581 
5582       // First check that the Alloca is static, otherwise it won't have a
5583       // valid frame index.
5584       auto SI = FuncInfo.StaticAllocaMap.find(LifetimeObject);
5585       if (SI == FuncInfo.StaticAllocaMap.end())
5586         return nullptr;
5587 
5588       int FI = SI->second;
5589 
5590       SDValue Ops[2];
5591       Ops[0] = getRoot();
5592       Ops[1] =
5593           DAG.getFrameIndex(FI, TLI.getFrameIndexTy(DAG.getDataLayout()), true);
5594       unsigned Opcode = (IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END);
5595 
5596       Res = DAG.getNode(Opcode, sdl, MVT::Other, Ops);
5597       DAG.setRoot(Res);
5598     }
5599     return nullptr;
5600   }
5601   case Intrinsic::invariant_start:
5602     // Discard region information.
5603     setValue(&I, DAG.getUNDEF(TLI.getPointerTy(DAG.getDataLayout())));
5604     return nullptr;
5605   case Intrinsic::invariant_end:
5606     // Discard region information.
5607     return nullptr;
5608   case Intrinsic::clear_cache:
5609     return TLI.getClearCacheBuiltinName();
5610   case Intrinsic::donothing:
5611     // ignore
5612     return nullptr;
5613   case Intrinsic::experimental_stackmap: {
5614     visitStackmap(I);
5615     return nullptr;
5616   }
5617   case Intrinsic::experimental_patchpoint_void:
5618   case Intrinsic::experimental_patchpoint_i64: {
5619     visitPatchpoint(&I);
5620     return nullptr;
5621   }
5622   case Intrinsic::experimental_gc_statepoint: {
5623     LowerStatepoint(ImmutableStatepoint(&I));
5624     return nullptr;
5625   }
5626   case Intrinsic::experimental_gc_result: {
5627     visitGCResult(cast<GCResultInst>(I));
5628     return nullptr;
5629   }
5630   case Intrinsic::experimental_gc_relocate: {
5631     visitGCRelocate(cast<GCRelocateInst>(I));
5632     return nullptr;
5633   }
5634   case Intrinsic::instrprof_increment:
5635     llvm_unreachable("instrprof failed to lower an increment");
5636   case Intrinsic::instrprof_value_profile:
5637     llvm_unreachable("instrprof failed to lower a value profiling call");
5638   case Intrinsic::localescape: {
5639     MachineFunction &MF = DAG.getMachineFunction();
5640     const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
5641 
5642     // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission
5643     // is the same on all targets.
5644     for (unsigned Idx = 0, E = I.getNumArgOperands(); Idx < E; ++Idx) {
5645       Value *Arg = I.getArgOperand(Idx)->stripPointerCasts();
5646       if (isa<ConstantPointerNull>(Arg))
5647         continue; // Skip null pointers. They represent a hole in index space.
5648       AllocaInst *Slot = cast<AllocaInst>(Arg);
5649       assert(FuncInfo.StaticAllocaMap.count(Slot) &&
5650              "can only escape static allocas");
5651       int FI = FuncInfo.StaticAllocaMap[Slot];
5652       MCSymbol *FrameAllocSym =
5653           MF.getMMI().getContext().getOrCreateFrameAllocSymbol(
5654               GlobalValue::getRealLinkageName(MF.getName()), Idx);
5655       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, dl,
5656               TII->get(TargetOpcode::LOCAL_ESCAPE))
5657           .addSym(FrameAllocSym)
5658           .addFrameIndex(FI);
5659     }
5660 
5661     return nullptr;
5662   }
5663 
5664   case Intrinsic::localrecover: {
5665     // i8* @llvm.localrecover(i8* %fn, i8* %fp, i32 %idx)
5666     MachineFunction &MF = DAG.getMachineFunction();
5667     MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout(), 0);
5668 
5669     // Get the symbol that defines the frame offset.
5670     auto *Fn = cast<Function>(I.getArgOperand(0)->stripPointerCasts());
5671     auto *Idx = cast<ConstantInt>(I.getArgOperand(2));
5672     unsigned IdxVal = unsigned(Idx->getLimitedValue(INT_MAX));
5673     MCSymbol *FrameAllocSym =
5674         MF.getMMI().getContext().getOrCreateFrameAllocSymbol(
5675             GlobalValue::getRealLinkageName(Fn->getName()), IdxVal);
5676 
5677     // Create a MCSymbol for the label to avoid any target lowering
5678     // that would make this PC relative.
5679     SDValue OffsetSym = DAG.getMCSymbol(FrameAllocSym, PtrVT);
5680     SDValue OffsetVal =
5681         DAG.getNode(ISD::LOCAL_RECOVER, sdl, PtrVT, OffsetSym);
5682 
5683     // Add the offset to the FP.
5684     Value *FP = I.getArgOperand(1);
5685     SDValue FPVal = getValue(FP);
5686     SDValue Add = DAG.getNode(ISD::ADD, sdl, PtrVT, FPVal, OffsetVal);
5687     setValue(&I, Add);
5688 
5689     return nullptr;
5690   }
5691 
5692   case Intrinsic::eh_exceptionpointer:
5693   case Intrinsic::eh_exceptioncode: {
5694     // Get the exception pointer vreg, copy from it, and resize it to fit.
5695     const auto *CPI = cast<CatchPadInst>(I.getArgOperand(0));
5696     MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
5697     const TargetRegisterClass *PtrRC = TLI.getRegClassFor(PtrVT);
5698     unsigned VReg = FuncInfo.getCatchPadExceptionPointerVReg(CPI, PtrRC);
5699     SDValue N =
5700         DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(), VReg, PtrVT);
5701     if (Intrinsic == Intrinsic::eh_exceptioncode)
5702       N = DAG.getZExtOrTrunc(N, getCurSDLoc(), MVT::i32);
5703     setValue(&I, N);
5704     return nullptr;
5705   }
5706   case Intrinsic::xray_customevent: {
5707     // Here we want to make sure that the intrinsic behaves as if it has a
5708     // specific calling convention, and only for x86_64.
5709     // FIXME: Support other platforms later.
5710     const auto &Triple = DAG.getTarget().getTargetTriple();
5711     if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux())
5712       return nullptr;
5713 
5714     SDLoc DL = getCurSDLoc();
5715     SmallVector<SDValue, 8> Ops;
5716 
5717     // We want to say that we always want the arguments in registers.
5718     SDValue LogEntryVal = getValue(I.getArgOperand(0));
5719     SDValue StrSizeVal = getValue(I.getArgOperand(1));
5720     SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
5721     SDValue Chain = getRoot();
5722     Ops.push_back(LogEntryVal);
5723     Ops.push_back(StrSizeVal);
5724     Ops.push_back(Chain);
5725 
5726     // We need to enforce the calling convention for the callsite, so that
5727     // argument ordering is enforced correctly, and that register allocation can
5728     // see that some registers may be assumed clobbered and have to preserve
5729     // them across calls to the intrinsic.
5730     MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHABLE_EVENT_CALL,
5731                                            DL, NodeTys, Ops);
5732     SDValue patchableNode = SDValue(MN, 0);
5733     DAG.setRoot(patchableNode);
5734     setValue(&I, patchableNode);
5735     return nullptr;
5736   }
5737   case Intrinsic::experimental_deoptimize:
5738     LowerDeoptimizeCall(&I);
5739     return nullptr;
5740 
5741   case Intrinsic::experimental_vector_reduce_fadd:
5742   case Intrinsic::experimental_vector_reduce_fmul:
5743   case Intrinsic::experimental_vector_reduce_add:
5744   case Intrinsic::experimental_vector_reduce_mul:
5745   case Intrinsic::experimental_vector_reduce_and:
5746   case Intrinsic::experimental_vector_reduce_or:
5747   case Intrinsic::experimental_vector_reduce_xor:
5748   case Intrinsic::experimental_vector_reduce_smax:
5749   case Intrinsic::experimental_vector_reduce_smin:
5750   case Intrinsic::experimental_vector_reduce_umax:
5751   case Intrinsic::experimental_vector_reduce_umin:
5752   case Intrinsic::experimental_vector_reduce_fmax:
5753   case Intrinsic::experimental_vector_reduce_fmin: {
5754     visitVectorReduce(I, Intrinsic);
5755     return nullptr;
5756   }
5757 
5758   }
5759 }
5760 
5761 void SelectionDAGBuilder::visitConstrainedFPIntrinsic(const CallInst &I,
5762                                                       unsigned Intrinsic) {
5763   SDLoc sdl = getCurSDLoc();
5764   unsigned Opcode;
5765   switch (Intrinsic) {
5766   default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
5767   case Intrinsic::experimental_constrained_fadd:
5768     Opcode = ISD::STRICT_FADD;
5769     break;
5770   case Intrinsic::experimental_constrained_fsub:
5771     Opcode = ISD::STRICT_FSUB;
5772     break;
5773   case Intrinsic::experimental_constrained_fmul:
5774     Opcode = ISD::STRICT_FMUL;
5775     break;
5776   case Intrinsic::experimental_constrained_fdiv:
5777     Opcode = ISD::STRICT_FDIV;
5778     break;
5779   case Intrinsic::experimental_constrained_frem:
5780     Opcode = ISD::STRICT_FREM;
5781     break;
5782   }
5783   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5784   SDValue Chain = getRoot();
5785   SDValue Ops[3] = { Chain, getValue(I.getArgOperand(0)),
5786                      getValue(I.getArgOperand(1)) };
5787   SmallVector<EVT, 4> ValueVTs;
5788   ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);
5789   ValueVTs.push_back(MVT::Other); // Out chain
5790 
5791   SDVTList VTs = DAG.getVTList(ValueVTs);
5792   SDValue Result = DAG.getNode(Opcode, sdl, VTs, Ops);
5793 
5794   assert(Result.getNode()->getNumValues() == 2);
5795   SDValue OutChain = Result.getValue(1);
5796   DAG.setRoot(OutChain);
5797   SDValue FPResult = Result.getValue(0);
5798   setValue(&I, FPResult);
5799 }
5800 
5801 std::pair<SDValue, SDValue>
5802 SelectionDAGBuilder::lowerInvokable(TargetLowering::CallLoweringInfo &CLI,
5803                                     const BasicBlock *EHPadBB) {
5804   MachineFunction &MF = DAG.getMachineFunction();
5805   MachineModuleInfo &MMI = MF.getMMI();
5806   MCSymbol *BeginLabel = nullptr;
5807 
5808   if (EHPadBB) {
5809     // Insert a label before the invoke call to mark the try range.  This can be
5810     // used to detect deletion of the invoke via the MachineModuleInfo.
5811     BeginLabel = MMI.getContext().createTempSymbol();
5812 
5813     // For SjLj, keep track of which landing pads go with which invokes
5814     // so as to maintain the ordering of pads in the LSDA.
5815     unsigned CallSiteIndex = MMI.getCurrentCallSite();
5816     if (CallSiteIndex) {
5817       MF.setCallSiteBeginLabel(BeginLabel, CallSiteIndex);
5818       LPadToCallSiteMap[FuncInfo.MBBMap[EHPadBB]].push_back(CallSiteIndex);
5819 
5820       // Now that the call site is handled, stop tracking it.
5821       MMI.setCurrentCallSite(0);
5822     }
5823 
5824     // Both PendingLoads and PendingExports must be flushed here;
5825     // this call might not return.
5826     (void)getRoot();
5827     DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getControlRoot(), BeginLabel));
5828 
5829     CLI.setChain(getRoot());
5830   }
5831   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5832   std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
5833 
5834   assert((CLI.IsTailCall || Result.second.getNode()) &&
5835          "Non-null chain expected with non-tail call!");
5836   assert((Result.second.getNode() || !Result.first.getNode()) &&
5837          "Null value expected with tail call!");
5838 
5839   if (!Result.second.getNode()) {
5840     // As a special case, a null chain means that a tail call has been emitted
5841     // and the DAG root is already updated.
5842     HasTailCall = true;
5843 
5844     // Since there's no actual continuation from this block, nothing can be
5845     // relying on us setting vregs for them.
5846     PendingExports.clear();
5847   } else {
5848     DAG.setRoot(Result.second);
5849   }
5850 
5851   if (EHPadBB) {
5852     // Insert a label at the end of the invoke call to mark the try range.  This
5853     // can be used to detect deletion of the invoke via the MachineModuleInfo.
5854     MCSymbol *EndLabel = MMI.getContext().createTempSymbol();
5855     DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getRoot(), EndLabel));
5856 
5857     // Inform MachineModuleInfo of range.
5858     if (MF.hasEHFunclets()) {
5859       assert(CLI.CS);
5860       WinEHFuncInfo *EHInfo = DAG.getMachineFunction().getWinEHFuncInfo();
5861       EHInfo->addIPToStateRange(cast<InvokeInst>(CLI.CS->getInstruction()),
5862                                 BeginLabel, EndLabel);
5863     } else {
5864       MF.addInvoke(FuncInfo.MBBMap[EHPadBB], BeginLabel, EndLabel);
5865     }
5866   }
5867 
5868   return Result;
5869 }
5870 
5871 void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
5872                                       bool isTailCall,
5873                                       const BasicBlock *EHPadBB) {
5874   auto &DL = DAG.getDataLayout();
5875   FunctionType *FTy = CS.getFunctionType();
5876   Type *RetTy = CS.getType();
5877 
5878   TargetLowering::ArgListTy Args;
5879   Args.reserve(CS.arg_size());
5880 
5881   const Value *SwiftErrorVal = nullptr;
5882   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5883 
5884   // We can't tail call inside a function with a swifterror argument. Lowering
5885   // does not support this yet. It would have to move into the swifterror
5886   // register before the call.
5887   auto *Caller = CS.getInstruction()->getParent()->getParent();
5888   if (TLI.supportSwiftError() &&
5889       Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
5890     isTailCall = false;
5891 
5892   for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
5893        i != e; ++i) {
5894     TargetLowering::ArgListEntry Entry;
5895     const Value *V = *i;
5896 
5897     // Skip empty types
5898     if (V->getType()->isEmptyTy())
5899       continue;
5900 
5901     SDValue ArgNode = getValue(V);
5902     Entry.Node = ArgNode; Entry.Ty = V->getType();
5903 
5904     Entry.setAttributes(&CS, i - CS.arg_begin());
5905 
5906     // Use swifterror virtual register as input to the call.
5907     if (Entry.IsSwiftError && TLI.supportSwiftError()) {
5908       SwiftErrorVal = V;
5909       // We find the virtual register for the actual swifterror argument.
5910       // Instead of using the Value, we use the virtual register instead.
5911       Entry.Node =
5912           DAG.getRegister(FuncInfo.getOrCreateSwiftErrorVReg(FuncInfo.MBB, V),
5913                           EVT(TLI.getPointerTy(DL)));
5914     }
5915 
5916     Args.push_back(Entry);
5917 
5918     // If we have an explicit sret argument that is an Instruction, (i.e., it
5919     // might point to function-local memory), we can't meaningfully tail-call.
5920     if (Entry.IsSRet && isa<Instruction>(V))
5921       isTailCall = false;
5922   }
5923 
5924   // Check if target-independent constraints permit a tail call here.
5925   // Target-dependent constraints are checked within TLI->LowerCallTo.
5926   if (isTailCall && !isInTailCallPosition(CS, DAG.getTarget()))
5927     isTailCall = false;
5928 
5929   // Disable tail calls if there is an swifterror argument. Targets have not
5930   // been updated to support tail calls.
5931   if (TLI.supportSwiftError() && SwiftErrorVal)
5932     isTailCall = false;
5933 
5934   TargetLowering::CallLoweringInfo CLI(DAG);
5935   CLI.setDebugLoc(getCurSDLoc())
5936       .setChain(getRoot())
5937       .setCallee(RetTy, FTy, Callee, std::move(Args), CS)
5938       .setTailCall(isTailCall)
5939       .setConvergent(CS.isConvergent());
5940   std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
5941 
5942   if (Result.first.getNode()) {
5943     const Instruction *Inst = CS.getInstruction();
5944     Result.first = lowerRangeToAssertZExt(DAG, *Inst, Result.first);
5945     setValue(Inst, Result.first);
5946   }
5947 
5948   // The last element of CLI.InVals has the SDValue for swifterror return.
5949   // Here we copy it to a virtual register and update SwiftErrorMap for
5950   // book-keeping.
5951   if (SwiftErrorVal && TLI.supportSwiftError()) {
5952     // Get the last element of InVals.
5953     SDValue Src = CLI.InVals.back();
5954     const TargetRegisterClass *RC = TLI.getRegClassFor(TLI.getPointerTy(DL));
5955     unsigned VReg = FuncInfo.MF->getRegInfo().createVirtualRegister(RC);
5956     SDValue CopyNode = CLI.DAG.getCopyToReg(Result.second, CLI.DL, VReg, Src);
5957     // We update the virtual register for the actual swifterror argument.
5958     FuncInfo.setCurrentSwiftErrorVReg(FuncInfo.MBB, SwiftErrorVal, VReg);
5959     DAG.setRoot(CopyNode);
5960   }
5961 }
5962 
5963 /// Return true if it only matters that the value is equal or not-equal to zero.
5964 static bool IsOnlyUsedInZeroEqualityComparison(const Value *V) {
5965   for (const User *U : V->users()) {
5966     if (const ICmpInst *IC = dyn_cast<ICmpInst>(U))
5967       if (IC->isEquality())
5968         if (const Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
5969           if (C->isNullValue())
5970             continue;
5971     // Unknown instruction.
5972     return false;
5973   }
5974   return true;
5975 }
5976 
5977 static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
5978                              SelectionDAGBuilder &Builder) {
5979 
5980   // Check to see if this load can be trivially constant folded, e.g. if the
5981   // input is from a string literal.
5982   if (const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) {
5983     // Cast pointer to the type we really want to load.
5984     Type *LoadTy =
5985         Type::getIntNTy(PtrVal->getContext(), LoadVT.getScalarSizeInBits());
5986     if (LoadVT.isVector())
5987       LoadTy = VectorType::get(LoadTy, LoadVT.getVectorNumElements());
5988 
5989     LoadInput = ConstantExpr::getBitCast(const_cast<Constant *>(LoadInput),
5990                                          PointerType::getUnqual(LoadTy));
5991 
5992     if (const Constant *LoadCst = ConstantFoldLoadFromConstPtr(
5993             const_cast<Constant *>(LoadInput), LoadTy, *Builder.DL))
5994       return Builder.getValue(LoadCst);
5995   }
5996 
5997   // Otherwise, we have to emit the load.  If the pointer is to unfoldable but
5998   // still constant memory, the input chain can be the entry node.
5999   SDValue Root;
6000   bool ConstantMemory = false;
6001 
6002   // Do not serialize (non-volatile) loads of constant memory with anything.
6003   if (Builder.AA->pointsToConstantMemory(PtrVal)) {
6004     Root = Builder.DAG.getEntryNode();
6005     ConstantMemory = true;
6006   } else {
6007     // Do not serialize non-volatile loads against each other.
6008     Root = Builder.DAG.getRoot();
6009   }
6010 
6011   SDValue Ptr = Builder.getValue(PtrVal);
6012   SDValue LoadVal = Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root,
6013                                         Ptr, MachinePointerInfo(PtrVal),
6014                                         /* Alignment = */ 1);
6015 
6016   if (!ConstantMemory)
6017     Builder.PendingLoads.push_back(LoadVal.getValue(1));
6018   return LoadVal;
6019 }
6020 
6021 /// Record the value for an instruction that produces an integer result,
6022 /// converting the type where necessary.
6023 void SelectionDAGBuilder::processIntegerCallValue(const Instruction &I,
6024                                                   SDValue Value,
6025                                                   bool IsSigned) {
6026   EVT VT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
6027                                                     I.getType(), true);
6028   if (IsSigned)
6029     Value = DAG.getSExtOrTrunc(Value, getCurSDLoc(), VT);
6030   else
6031     Value = DAG.getZExtOrTrunc(Value, getCurSDLoc(), VT);
6032   setValue(&I, Value);
6033 }
6034 
6035 /// See if we can lower a memcmp call into an optimized form. If so, return
6036 /// true and lower it. Otherwise return false, and it will be lowered like a
6037 /// normal call.
6038 /// The caller already checked that \p I calls the appropriate LibFunc with a
6039 /// correct prototype.
6040 bool SelectionDAGBuilder::visitMemCmpCall(const CallInst &I) {
6041   const Value *LHS = I.getArgOperand(0), *RHS = I.getArgOperand(1);
6042   const Value *Size = I.getArgOperand(2);
6043   const ConstantInt *CSize = dyn_cast<ConstantInt>(Size);
6044   if (CSize && CSize->getZExtValue() == 0) {
6045     EVT CallVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
6046                                                           I.getType(), true);
6047     setValue(&I, DAG.getConstant(0, getCurSDLoc(), CallVT));
6048     return true;
6049   }
6050 
6051   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
6052   std::pair<SDValue, SDValue> Res = TSI.EmitTargetCodeForMemcmp(
6053       DAG, getCurSDLoc(), DAG.getRoot(), getValue(LHS), getValue(RHS),
6054       getValue(Size), MachinePointerInfo(LHS), MachinePointerInfo(RHS));
6055   if (Res.first.getNode()) {
6056     processIntegerCallValue(I, Res.first, true);
6057     PendingLoads.push_back(Res.second);
6058     return true;
6059   }
6060 
6061   // memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS)  != 0
6062   // memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS)  != 0
6063   if (!CSize || !IsOnlyUsedInZeroEqualityComparison(&I))
6064     return false;
6065 
6066   // If the target has a fast compare for the given size, it will return a
6067   // preferred load type for that size. Require that the load VT is legal and
6068   // that the target supports unaligned loads of that type. Otherwise, return
6069   // INVALID.
6070   auto hasFastLoadsAndCompare = [&](unsigned NumBits) {
6071     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6072     MVT LVT = TLI.hasFastEqualityCompare(NumBits);
6073     if (LVT != MVT::INVALID_SIMPLE_VALUE_TYPE) {
6074       // TODO: Handle 5 byte compare as 4-byte + 1 byte.
6075       // TODO: Handle 8 byte compare on x86-32 as two 32-bit loads.
6076       // TODO: Check alignment of src and dest ptrs.
6077       unsigned DstAS = LHS->getType()->getPointerAddressSpace();
6078       unsigned SrcAS = RHS->getType()->getPointerAddressSpace();
6079       if (!TLI.isTypeLegal(LVT) ||
6080           !TLI.allowsMisalignedMemoryAccesses(LVT, SrcAS) ||
6081           !TLI.allowsMisalignedMemoryAccesses(LVT, DstAS))
6082         LVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
6083     }
6084 
6085     return LVT;
6086   };
6087 
6088   // This turns into unaligned loads. We only do this if the target natively
6089   // supports the MVT we'll be loading or if it is small enough (<= 4) that
6090   // we'll only produce a small number of byte loads.
6091   MVT LoadVT;
6092   unsigned NumBitsToCompare = CSize->getZExtValue() * 8;
6093   switch (NumBitsToCompare) {
6094   default:
6095     return false;
6096   case 16:
6097     LoadVT = MVT::i16;
6098     break;
6099   case 32:
6100     LoadVT = MVT::i32;
6101     break;
6102   case 64:
6103   case 128:
6104   case 256:
6105     LoadVT = hasFastLoadsAndCompare(NumBitsToCompare);
6106     break;
6107   }
6108 
6109   if (LoadVT == MVT::INVALID_SIMPLE_VALUE_TYPE)
6110     return false;
6111 
6112   SDValue LoadL = getMemCmpLoad(LHS, LoadVT, *this);
6113   SDValue LoadR = getMemCmpLoad(RHS, LoadVT, *this);
6114 
6115   // Bitcast to a wide integer type if the loads are vectors.
6116   if (LoadVT.isVector()) {
6117     EVT CmpVT = EVT::getIntegerVT(LHS->getContext(), LoadVT.getSizeInBits());
6118     LoadL = DAG.getBitcast(CmpVT, LoadL);
6119     LoadR = DAG.getBitcast(CmpVT, LoadR);
6120   }
6121 
6122   SDValue Cmp = DAG.getSetCC(getCurSDLoc(), MVT::i1, LoadL, LoadR, ISD::SETNE);
6123   processIntegerCallValue(I, Cmp, false);
6124   return true;
6125 }
6126 
6127 /// See if we can lower a memchr call into an optimized form. If so, return
6128 /// true and lower it. Otherwise return false, and it will be lowered like a
6129 /// normal call.
6130 /// The caller already checked that \p I calls the appropriate LibFunc with a
6131 /// correct prototype.
6132 bool SelectionDAGBuilder::visitMemChrCall(const CallInst &I) {
6133   const Value *Src = I.getArgOperand(0);
6134   const Value *Char = I.getArgOperand(1);
6135   const Value *Length = I.getArgOperand(2);
6136 
6137   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
6138   std::pair<SDValue, SDValue> Res =
6139     TSI.EmitTargetCodeForMemchr(DAG, getCurSDLoc(), DAG.getRoot(),
6140                                 getValue(Src), getValue(Char), getValue(Length),
6141                                 MachinePointerInfo(Src));
6142   if (Res.first.getNode()) {
6143     setValue(&I, Res.first);
6144     PendingLoads.push_back(Res.second);
6145     return true;
6146   }
6147 
6148   return false;
6149 }
6150 
6151 /// See if we can lower a mempcpy call into an optimized form. If so, return
6152 /// true and lower it. Otherwise return false, and it will be lowered like a
6153 /// normal call.
6154 /// The caller already checked that \p I calls the appropriate LibFunc with a
6155 /// correct prototype.
6156 bool SelectionDAGBuilder::visitMemPCpyCall(const CallInst &I) {
6157   SDValue Dst = getValue(I.getArgOperand(0));
6158   SDValue Src = getValue(I.getArgOperand(1));
6159   SDValue Size = getValue(I.getArgOperand(2));
6160 
6161   unsigned DstAlign = DAG.InferPtrAlignment(Dst);
6162   unsigned SrcAlign = DAG.InferPtrAlignment(Src);
6163   unsigned Align = std::min(DstAlign, SrcAlign);
6164   if (Align == 0) // Alignment of one or both could not be inferred.
6165     Align = 1; // 0 and 1 both specify no alignment, but 0 is reserved.
6166 
6167   bool isVol = false;
6168   SDLoc sdl = getCurSDLoc();
6169 
6170   // In the mempcpy context we need to pass in a false value for isTailCall
6171   // because the return pointer needs to be adjusted by the size of
6172   // the copied memory.
6173   SDValue MC = DAG.getMemcpy(getRoot(), sdl, Dst, Src, Size, Align, isVol,
6174                              false, /*isTailCall=*/false,
6175                              MachinePointerInfo(I.getArgOperand(0)),
6176                              MachinePointerInfo(I.getArgOperand(1)));
6177   assert(MC.getNode() != nullptr &&
6178          "** memcpy should not be lowered as TailCall in mempcpy context **");
6179   DAG.setRoot(MC);
6180 
6181   // Check if Size needs to be truncated or extended.
6182   Size = DAG.getSExtOrTrunc(Size, sdl, Dst.getValueType());
6183 
6184   // Adjust return pointer to point just past the last dst byte.
6185   SDValue DstPlusSize = DAG.getNode(ISD::ADD, sdl, Dst.getValueType(),
6186                                     Dst, Size);
6187   setValue(&I, DstPlusSize);
6188   return true;
6189 }
6190 
6191 /// See if we can lower a strcpy call into an optimized form.  If so, return
6192 /// true and lower it, otherwise return false and it will be lowered like a
6193 /// normal call.
6194 /// The caller already checked that \p I calls the appropriate LibFunc with a
6195 /// correct prototype.
6196 bool SelectionDAGBuilder::visitStrCpyCall(const CallInst &I, bool isStpcpy) {
6197   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
6198 
6199   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
6200   std::pair<SDValue, SDValue> Res =
6201     TSI.EmitTargetCodeForStrcpy(DAG, getCurSDLoc(), getRoot(),
6202                                 getValue(Arg0), getValue(Arg1),
6203                                 MachinePointerInfo(Arg0),
6204                                 MachinePointerInfo(Arg1), isStpcpy);
6205   if (Res.first.getNode()) {
6206     setValue(&I, Res.first);
6207     DAG.setRoot(Res.second);
6208     return true;
6209   }
6210 
6211   return false;
6212 }
6213 
6214 /// See if we can lower a strcmp call into an optimized form.  If so, return
6215 /// true and lower it, otherwise return false and it will be lowered like a
6216 /// normal call.
6217 /// The caller already checked that \p I calls the appropriate LibFunc with a
6218 /// correct prototype.
6219 bool SelectionDAGBuilder::visitStrCmpCall(const CallInst &I) {
6220   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
6221 
6222   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
6223   std::pair<SDValue, SDValue> Res =
6224     TSI.EmitTargetCodeForStrcmp(DAG, getCurSDLoc(), DAG.getRoot(),
6225                                 getValue(Arg0), getValue(Arg1),
6226                                 MachinePointerInfo(Arg0),
6227                                 MachinePointerInfo(Arg1));
6228   if (Res.first.getNode()) {
6229     processIntegerCallValue(I, Res.first, true);
6230     PendingLoads.push_back(Res.second);
6231     return true;
6232   }
6233 
6234   return false;
6235 }
6236 
6237 /// See if we can lower a strlen call into an optimized form.  If so, return
6238 /// true and lower it, otherwise return false and it will be lowered like a
6239 /// normal call.
6240 /// The caller already checked that \p I calls the appropriate LibFunc with a
6241 /// correct prototype.
6242 bool SelectionDAGBuilder::visitStrLenCall(const CallInst &I) {
6243   const Value *Arg0 = I.getArgOperand(0);
6244 
6245   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
6246   std::pair<SDValue, SDValue> Res =
6247     TSI.EmitTargetCodeForStrlen(DAG, getCurSDLoc(), DAG.getRoot(),
6248                                 getValue(Arg0), MachinePointerInfo(Arg0));
6249   if (Res.first.getNode()) {
6250     processIntegerCallValue(I, Res.first, false);
6251     PendingLoads.push_back(Res.second);
6252     return true;
6253   }
6254 
6255   return false;
6256 }
6257 
6258 /// See if we can lower a strnlen call into an optimized form.  If so, return
6259 /// true and lower it, otherwise return false and it will be lowered like a
6260 /// normal call.
6261 /// The caller already checked that \p I calls the appropriate LibFunc with a
6262 /// correct prototype.
6263 bool SelectionDAGBuilder::visitStrNLenCall(const CallInst &I) {
6264   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
6265 
6266   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
6267   std::pair<SDValue, SDValue> Res =
6268     TSI.EmitTargetCodeForStrnlen(DAG, getCurSDLoc(), DAG.getRoot(),
6269                                  getValue(Arg0), getValue(Arg1),
6270                                  MachinePointerInfo(Arg0));
6271   if (Res.first.getNode()) {
6272     processIntegerCallValue(I, Res.first, false);
6273     PendingLoads.push_back(Res.second);
6274     return true;
6275   }
6276 
6277   return false;
6278 }
6279 
6280 /// See if we can lower a unary floating-point operation into an SDNode with
6281 /// the specified Opcode.  If so, return true and lower it, otherwise return
6282 /// false and it will be lowered like a normal call.
6283 /// The caller already checked that \p I calls the appropriate LibFunc with a
6284 /// correct prototype.
6285 bool SelectionDAGBuilder::visitUnaryFloatCall(const CallInst &I,
6286                                               unsigned Opcode) {
6287   // We already checked this call's prototype; verify it doesn't modify errno.
6288   if (!I.onlyReadsMemory())
6289     return false;
6290 
6291   SDValue Tmp = getValue(I.getArgOperand(0));
6292   setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), Tmp.getValueType(), Tmp));
6293   return true;
6294 }
6295 
6296 /// See if we can lower a binary floating-point operation into an SDNode with
6297 /// the specified Opcode. If so, return true and lower it. Otherwise return
6298 /// false, and it will be lowered like a normal call.
6299 /// The caller already checked that \p I calls the appropriate LibFunc with a
6300 /// correct prototype.
6301 bool SelectionDAGBuilder::visitBinaryFloatCall(const CallInst &I,
6302                                                unsigned Opcode) {
6303   // We already checked this call's prototype; verify it doesn't modify errno.
6304   if (!I.onlyReadsMemory())
6305     return false;
6306 
6307   SDValue Tmp0 = getValue(I.getArgOperand(0));
6308   SDValue Tmp1 = getValue(I.getArgOperand(1));
6309   EVT VT = Tmp0.getValueType();
6310   setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), VT, Tmp0, Tmp1));
6311   return true;
6312 }
6313 
6314 void SelectionDAGBuilder::visitCall(const CallInst &I) {
6315   // Handle inline assembly differently.
6316   if (isa<InlineAsm>(I.getCalledValue())) {
6317     visitInlineAsm(&I);
6318     return;
6319   }
6320 
6321   MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
6322   computeUsesVAFloatArgument(I, MMI);
6323 
6324   const char *RenameFn = nullptr;
6325   if (Function *F = I.getCalledFunction()) {
6326     if (F->isDeclaration()) {
6327       if (const TargetIntrinsicInfo *II = TM.getIntrinsicInfo()) {
6328         if (unsigned IID = II->getIntrinsicID(F)) {
6329           RenameFn = visitIntrinsicCall(I, IID);
6330           if (!RenameFn)
6331             return;
6332         }
6333       }
6334       if (Intrinsic::ID IID = F->getIntrinsicID()) {
6335         RenameFn = visitIntrinsicCall(I, IID);
6336         if (!RenameFn)
6337           return;
6338       }
6339     }
6340 
6341     // Check for well-known libc/libm calls.  If the function is internal, it
6342     // can't be a library call.  Don't do the check if marked as nobuiltin for
6343     // some reason.
6344     LibFunc Func;
6345     if (!I.isNoBuiltin() && !F->hasLocalLinkage() && F->hasName() &&
6346         LibInfo->getLibFunc(*F, Func) &&
6347         LibInfo->hasOptimizedCodeGen(Func)) {
6348       switch (Func) {
6349       default: break;
6350       case LibFunc_copysign:
6351       case LibFunc_copysignf:
6352       case LibFunc_copysignl:
6353         // We already checked this call's prototype; verify it doesn't modify
6354         // errno.
6355         if (I.onlyReadsMemory()) {
6356           SDValue LHS = getValue(I.getArgOperand(0));
6357           SDValue RHS = getValue(I.getArgOperand(1));
6358           setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurSDLoc(),
6359                                    LHS.getValueType(), LHS, RHS));
6360           return;
6361         }
6362         break;
6363       case LibFunc_fabs:
6364       case LibFunc_fabsf:
6365       case LibFunc_fabsl:
6366         if (visitUnaryFloatCall(I, ISD::FABS))
6367           return;
6368         break;
6369       case LibFunc_fmin:
6370       case LibFunc_fminf:
6371       case LibFunc_fminl:
6372         if (visitBinaryFloatCall(I, ISD::FMINNUM))
6373           return;
6374         break;
6375       case LibFunc_fmax:
6376       case LibFunc_fmaxf:
6377       case LibFunc_fmaxl:
6378         if (visitBinaryFloatCall(I, ISD::FMAXNUM))
6379           return;
6380         break;
6381       case LibFunc_sin:
6382       case LibFunc_sinf:
6383       case LibFunc_sinl:
6384         if (visitUnaryFloatCall(I, ISD::FSIN))
6385           return;
6386         break;
6387       case LibFunc_cos:
6388       case LibFunc_cosf:
6389       case LibFunc_cosl:
6390         if (visitUnaryFloatCall(I, ISD::FCOS))
6391           return;
6392         break;
6393       case LibFunc_sqrt:
6394       case LibFunc_sqrtf:
6395       case LibFunc_sqrtl:
6396       case LibFunc_sqrt_finite:
6397       case LibFunc_sqrtf_finite:
6398       case LibFunc_sqrtl_finite:
6399         if (visitUnaryFloatCall(I, ISD::FSQRT))
6400           return;
6401         break;
6402       case LibFunc_floor:
6403       case LibFunc_floorf:
6404       case LibFunc_floorl:
6405         if (visitUnaryFloatCall(I, ISD::FFLOOR))
6406           return;
6407         break;
6408       case LibFunc_nearbyint:
6409       case LibFunc_nearbyintf:
6410       case LibFunc_nearbyintl:
6411         if (visitUnaryFloatCall(I, ISD::FNEARBYINT))
6412           return;
6413         break;
6414       case LibFunc_ceil:
6415       case LibFunc_ceilf:
6416       case LibFunc_ceill:
6417         if (visitUnaryFloatCall(I, ISD::FCEIL))
6418           return;
6419         break;
6420       case LibFunc_rint:
6421       case LibFunc_rintf:
6422       case LibFunc_rintl:
6423         if (visitUnaryFloatCall(I, ISD::FRINT))
6424           return;
6425         break;
6426       case LibFunc_round:
6427       case LibFunc_roundf:
6428       case LibFunc_roundl:
6429         if (visitUnaryFloatCall(I, ISD::FROUND))
6430           return;
6431         break;
6432       case LibFunc_trunc:
6433       case LibFunc_truncf:
6434       case LibFunc_truncl:
6435         if (visitUnaryFloatCall(I, ISD::FTRUNC))
6436           return;
6437         break;
6438       case LibFunc_log2:
6439       case LibFunc_log2f:
6440       case LibFunc_log2l:
6441         if (visitUnaryFloatCall(I, ISD::FLOG2))
6442           return;
6443         break;
6444       case LibFunc_exp2:
6445       case LibFunc_exp2f:
6446       case LibFunc_exp2l:
6447         if (visitUnaryFloatCall(I, ISD::FEXP2))
6448           return;
6449         break;
6450       case LibFunc_memcmp:
6451         if (visitMemCmpCall(I))
6452           return;
6453         break;
6454       case LibFunc_mempcpy:
6455         if (visitMemPCpyCall(I))
6456           return;
6457         break;
6458       case LibFunc_memchr:
6459         if (visitMemChrCall(I))
6460           return;
6461         break;
6462       case LibFunc_strcpy:
6463         if (visitStrCpyCall(I, false))
6464           return;
6465         break;
6466       case LibFunc_stpcpy:
6467         if (visitStrCpyCall(I, true))
6468           return;
6469         break;
6470       case LibFunc_strcmp:
6471         if (visitStrCmpCall(I))
6472           return;
6473         break;
6474       case LibFunc_strlen:
6475         if (visitStrLenCall(I))
6476           return;
6477         break;
6478       case LibFunc_strnlen:
6479         if (visitStrNLenCall(I))
6480           return;
6481         break;
6482       }
6483     }
6484   }
6485 
6486   SDValue Callee;
6487   if (!RenameFn)
6488     Callee = getValue(I.getCalledValue());
6489   else
6490     Callee = DAG.getExternalSymbol(
6491         RenameFn,
6492         DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()));
6493 
6494   // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
6495   // have to do anything here to lower funclet bundles.
6496   assert(!I.hasOperandBundlesOtherThan(
6497              {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) &&
6498          "Cannot lower calls with arbitrary operand bundles!");
6499 
6500   if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
6501     LowerCallSiteWithDeoptBundle(&I, Callee, nullptr);
6502   else
6503     // Check if we can potentially perform a tail call. More detailed checking
6504     // is be done within LowerCallTo, after more information about the call is
6505     // known.
6506     LowerCallTo(&I, Callee, I.isTailCall());
6507 }
6508 
6509 namespace {
6510 
6511 /// AsmOperandInfo - This contains information for each constraint that we are
6512 /// lowering.
6513 class SDISelAsmOperandInfo : public TargetLowering::AsmOperandInfo {
6514 public:
6515   /// CallOperand - If this is the result output operand or a clobber
6516   /// this is null, otherwise it is the incoming operand to the CallInst.
6517   /// This gets modified as the asm is processed.
6518   SDValue CallOperand;
6519 
6520   /// AssignedRegs - If this is a register or register class operand, this
6521   /// contains the set of register corresponding to the operand.
6522   RegsForValue AssignedRegs;
6523 
6524   explicit SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &info)
6525     : TargetLowering::AsmOperandInfo(info), CallOperand(nullptr,0) {
6526   }
6527 
6528   /// Whether or not this operand accesses memory
6529   bool hasMemory(const TargetLowering &TLI) const {
6530     // Indirect operand accesses access memory.
6531     if (isIndirect)
6532       return true;
6533 
6534     for (const auto &Code : Codes)
6535       if (TLI.getConstraintType(Code) == TargetLowering::C_Memory)
6536         return true;
6537 
6538     return false;
6539   }
6540 
6541   /// getCallOperandValEVT - Return the EVT of the Value* that this operand
6542   /// corresponds to.  If there is no Value* for this operand, it returns
6543   /// MVT::Other.
6544   EVT getCallOperandValEVT(LLVMContext &Context, const TargetLowering &TLI,
6545                            const DataLayout &DL) const {
6546     if (!CallOperandVal) return MVT::Other;
6547 
6548     if (isa<BasicBlock>(CallOperandVal))
6549       return TLI.getPointerTy(DL);
6550 
6551     llvm::Type *OpTy = CallOperandVal->getType();
6552 
6553     // FIXME: code duplicated from TargetLowering::ParseConstraints().
6554     // If this is an indirect operand, the operand is a pointer to the
6555     // accessed type.
6556     if (isIndirect) {
6557       llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
6558       if (!PtrTy)
6559         report_fatal_error("Indirect operand for inline asm not a pointer!");
6560       OpTy = PtrTy->getElementType();
6561     }
6562 
6563     // Look for vector wrapped in a struct. e.g. { <16 x i8> }.
6564     if (StructType *STy = dyn_cast<StructType>(OpTy))
6565       if (STy->getNumElements() == 1)
6566         OpTy = STy->getElementType(0);
6567 
6568     // If OpTy is not a single value, it may be a struct/union that we
6569     // can tile with integers.
6570     if (!OpTy->isSingleValueType() && OpTy->isSized()) {
6571       unsigned BitSize = DL.getTypeSizeInBits(OpTy);
6572       switch (BitSize) {
6573       default: break;
6574       case 1:
6575       case 8:
6576       case 16:
6577       case 32:
6578       case 64:
6579       case 128:
6580         OpTy = IntegerType::get(Context, BitSize);
6581         break;
6582       }
6583     }
6584 
6585     return TLI.getValueType(DL, OpTy, true);
6586   }
6587 };
6588 
6589 typedef SmallVector<SDISelAsmOperandInfo,16> SDISelAsmOperandInfoVector;
6590 
6591 } // end anonymous namespace
6592 
6593 /// Make sure that the output operand \p OpInfo and its corresponding input
6594 /// operand \p MatchingOpInfo have compatible constraint types (otherwise error
6595 /// out).
6596 static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo,
6597                                SDISelAsmOperandInfo &MatchingOpInfo,
6598                                SelectionDAG &DAG) {
6599   if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT)
6600     return;
6601 
6602   const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
6603   const auto &TLI = DAG.getTargetLoweringInfo();
6604 
6605   std::pair<unsigned, const TargetRegisterClass *> MatchRC =
6606       TLI.getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode,
6607                                        OpInfo.ConstraintVT);
6608   std::pair<unsigned, const TargetRegisterClass *> InputRC =
6609       TLI.getRegForInlineAsmConstraint(TRI, MatchingOpInfo.ConstraintCode,
6610                                        MatchingOpInfo.ConstraintVT);
6611   if ((OpInfo.ConstraintVT.isInteger() !=
6612        MatchingOpInfo.ConstraintVT.isInteger()) ||
6613       (MatchRC.second != InputRC.second)) {
6614     // FIXME: error out in a more elegant fashion
6615     report_fatal_error("Unsupported asm: input constraint"
6616                        " with a matching output constraint of"
6617                        " incompatible type!");
6618   }
6619   MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT;
6620 }
6621 
6622 /// Get a direct memory input to behave well as an indirect operand.
6623 /// This may introduce stores, hence the need for a \p Chain.
6624 /// \return The (possibly updated) chain.
6625 static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location,
6626                                         SDISelAsmOperandInfo &OpInfo,
6627                                         SelectionDAG &DAG) {
6628   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6629 
6630   // If we don't have an indirect input, put it in the constpool if we can,
6631   // otherwise spill it to a stack slot.
6632   // TODO: This isn't quite right. We need to handle these according to
6633   // the addressing mode that the constraint wants. Also, this may take
6634   // an additional register for the computation and we don't want that
6635   // either.
6636 
6637   // If the operand is a float, integer, or vector constant, spill to a
6638   // constant pool entry to get its address.
6639   const Value *OpVal = OpInfo.CallOperandVal;
6640   if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
6641       isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) {
6642     OpInfo.CallOperand = DAG.getConstantPool(
6643         cast<Constant>(OpVal), TLI.getPointerTy(DAG.getDataLayout()));
6644     return Chain;
6645   }
6646 
6647   // Otherwise, create a stack slot and emit a store to it before the asm.
6648   Type *Ty = OpVal->getType();
6649   auto &DL = DAG.getDataLayout();
6650   uint64_t TySize = DL.getTypeAllocSize(Ty);
6651   unsigned Align = DL.getPrefTypeAlignment(Ty);
6652   MachineFunction &MF = DAG.getMachineFunction();
6653   int SSFI = MF.getFrameInfo().CreateStackObject(TySize, Align, false);
6654   SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getFrameIndexTy(DL));
6655   Chain = DAG.getStore(Chain, Location, OpInfo.CallOperand, StackSlot,
6656                        MachinePointerInfo::getFixedStack(MF, SSFI));
6657   OpInfo.CallOperand = StackSlot;
6658 
6659   return Chain;
6660 }
6661 
6662 /// GetRegistersForValue - Assign registers (virtual or physical) for the
6663 /// specified operand.  We prefer to assign virtual registers, to allow the
6664 /// register allocator to handle the assignment process.  However, if the asm
6665 /// uses features that we can't model on machineinstrs, we have SDISel do the
6666 /// allocation.  This produces generally horrible, but correct, code.
6667 ///
6668 ///   OpInfo describes the operand.
6669 ///
6670 static void GetRegistersForValue(SelectionDAG &DAG, const TargetLowering &TLI,
6671                                  const SDLoc &DL,
6672                                  SDISelAsmOperandInfo &OpInfo) {
6673   LLVMContext &Context = *DAG.getContext();
6674 
6675   MachineFunction &MF = DAG.getMachineFunction();
6676   SmallVector<unsigned, 4> Regs;
6677   const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
6678 
6679   // If this is a constraint for a single physreg, or a constraint for a
6680   // register class, find it.
6681   std::pair<unsigned, const TargetRegisterClass *> PhysReg =
6682       TLI.getRegForInlineAsmConstraint(&TRI, OpInfo.ConstraintCode,
6683                                        OpInfo.ConstraintVT);
6684 
6685   unsigned NumRegs = 1;
6686   if (OpInfo.ConstraintVT != MVT::Other) {
6687     // If this is a FP input in an integer register (or visa versa) insert a bit
6688     // cast of the input value.  More generally, handle any case where the input
6689     // value disagrees with the register class we plan to stick this in.
6690     if (OpInfo.Type == InlineAsm::isInput && PhysReg.second &&
6691         !TRI.isTypeLegalForClass(*PhysReg.second, OpInfo.ConstraintVT)) {
6692       // Try to convert to the first EVT that the reg class contains.  If the
6693       // types are identical size, use a bitcast to convert (e.g. two differing
6694       // vector types).
6695       MVT RegVT = *TRI.legalclasstypes_begin(*PhysReg.second);
6696       if (RegVT.getSizeInBits() == OpInfo.CallOperand.getValueSizeInBits()) {
6697         OpInfo.CallOperand = DAG.getNode(ISD::BITCAST, DL,
6698                                          RegVT, OpInfo.CallOperand);
6699         OpInfo.ConstraintVT = RegVT;
6700       } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
6701         // If the input is a FP value and we want it in FP registers, do a
6702         // bitcast to the corresponding integer type.  This turns an f64 value
6703         // into i64, which can be passed with two i32 values on a 32-bit
6704         // machine.
6705         RegVT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits());
6706         OpInfo.CallOperand = DAG.getNode(ISD::BITCAST, DL,
6707                                          RegVT, OpInfo.CallOperand);
6708         OpInfo.ConstraintVT = RegVT;
6709       }
6710     }
6711 
6712     NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT);
6713   }
6714 
6715   MVT RegVT;
6716   EVT ValueVT = OpInfo.ConstraintVT;
6717 
6718   // If this is a constraint for a specific physical register, like {r17},
6719   // assign it now.
6720   if (unsigned AssignedReg = PhysReg.first) {
6721     const TargetRegisterClass *RC = PhysReg.second;
6722     if (OpInfo.ConstraintVT == MVT::Other)
6723       ValueVT = *TRI.legalclasstypes_begin(*RC);
6724 
6725     // Get the actual register value type.  This is important, because the user
6726     // may have asked for (e.g.) the AX register in i32 type.  We need to
6727     // remember that AX is actually i16 to get the right extension.
6728     RegVT = *TRI.legalclasstypes_begin(*RC);
6729 
6730     // This is a explicit reference to a physical register.
6731     Regs.push_back(AssignedReg);
6732 
6733     // If this is an expanded reference, add the rest of the regs to Regs.
6734     if (NumRegs != 1) {
6735       TargetRegisterClass::iterator I = RC->begin();
6736       for (; *I != AssignedReg; ++I)
6737         assert(I != RC->end() && "Didn't find reg!");
6738 
6739       // Already added the first reg.
6740       --NumRegs; ++I;
6741       for (; NumRegs; --NumRegs, ++I) {
6742         assert(I != RC->end() && "Ran out of registers to allocate!");
6743         Regs.push_back(*I);
6744       }
6745     }
6746 
6747     OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
6748     return;
6749   }
6750 
6751   // Otherwise, if this was a reference to an LLVM register class, create vregs
6752   // for this reference.
6753   if (const TargetRegisterClass *RC = PhysReg.second) {
6754     RegVT = *TRI.legalclasstypes_begin(*RC);
6755     if (OpInfo.ConstraintVT == MVT::Other)
6756       ValueVT = RegVT;
6757 
6758     // Create the appropriate number of virtual registers.
6759     MachineRegisterInfo &RegInfo = MF.getRegInfo();
6760     for (; NumRegs; --NumRegs)
6761       Regs.push_back(RegInfo.createVirtualRegister(RC));
6762 
6763     OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
6764     return;
6765   }
6766 
6767   // Otherwise, we couldn't allocate enough registers for this.
6768 }
6769 
6770 static unsigned
6771 findMatchingInlineAsmOperand(unsigned OperandNo,
6772                              const std::vector<SDValue> &AsmNodeOperands) {
6773   // Scan until we find the definition we already emitted of this operand.
6774   unsigned CurOp = InlineAsm::Op_FirstOperand;
6775   for (; OperandNo; --OperandNo) {
6776     // Advance to the next operand.
6777     unsigned OpFlag =
6778         cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
6779     assert((InlineAsm::isRegDefKind(OpFlag) ||
6780             InlineAsm::isRegDefEarlyClobberKind(OpFlag) ||
6781             InlineAsm::isMemKind(OpFlag)) &&
6782            "Skipped past definitions?");
6783     CurOp += InlineAsm::getNumOperandRegisters(OpFlag) + 1;
6784   }
6785   return CurOp;
6786 }
6787 
6788 /// Fill \p Regs with \p NumRegs new virtual registers of type \p RegVT
6789 /// \return true if it has succeeded, false otherwise
6790 static bool createVirtualRegs(SmallVector<unsigned, 4> &Regs, unsigned NumRegs,
6791                               MVT RegVT, SelectionDAG &DAG) {
6792   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6793   MachineRegisterInfo &RegInfo = DAG.getMachineFunction().getRegInfo();
6794   for (unsigned i = 0, e = NumRegs; i != e; ++i) {
6795     if (const TargetRegisterClass *RC = TLI.getRegClassFor(RegVT))
6796       Regs.push_back(RegInfo.createVirtualRegister(RC));
6797     else
6798       return false;
6799   }
6800   return true;
6801 }
6802 
6803 class ExtraFlags {
6804   unsigned Flags = 0;
6805 
6806 public:
6807   explicit ExtraFlags(ImmutableCallSite CS) {
6808     const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
6809     if (IA->hasSideEffects())
6810       Flags |= InlineAsm::Extra_HasSideEffects;
6811     if (IA->isAlignStack())
6812       Flags |= InlineAsm::Extra_IsAlignStack;
6813     if (CS.isConvergent())
6814       Flags |= InlineAsm::Extra_IsConvergent;
6815     Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
6816   }
6817 
6818   void update(const llvm::TargetLowering::AsmOperandInfo &OpInfo) {
6819     // Ideally, we would only check against memory constraints.  However, the
6820     // meaning of an Other constraint can be target-specific and we can't easily
6821     // reason about it.  Therefore, be conservative and set MayLoad/MayStore
6822     // for Other constraints as well.
6823     if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
6824         OpInfo.ConstraintType == TargetLowering::C_Other) {
6825       if (OpInfo.Type == InlineAsm::isInput)
6826         Flags |= InlineAsm::Extra_MayLoad;
6827       else if (OpInfo.Type == InlineAsm::isOutput)
6828         Flags |= InlineAsm::Extra_MayStore;
6829       else if (OpInfo.Type == InlineAsm::isClobber)
6830         Flags |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore);
6831     }
6832   }
6833 
6834   unsigned get() const { return Flags; }
6835 };
6836 
6837 /// visitInlineAsm - Handle a call to an InlineAsm object.
6838 ///
6839 void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
6840   const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
6841 
6842   /// ConstraintOperands - Information about all of the constraints.
6843   SDISelAsmOperandInfoVector ConstraintOperands;
6844 
6845   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6846   TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints(
6847       DAG.getDataLayout(), DAG.getSubtarget().getRegisterInfo(), CS);
6848 
6849   bool hasMemory = false;
6850 
6851   // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore
6852   ExtraFlags ExtraInfo(CS);
6853 
6854   unsigned ArgNo = 0;   // ArgNo - The argument of the CallInst.
6855   unsigned ResNo = 0;   // ResNo - The result number of the next output.
6856   for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
6857     ConstraintOperands.push_back(SDISelAsmOperandInfo(TargetConstraints[i]));
6858     SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
6859 
6860     MVT OpVT = MVT::Other;
6861 
6862     // Compute the value type for each operand.
6863     if (OpInfo.Type == InlineAsm::isInput ||
6864         (OpInfo.Type == InlineAsm::isOutput && OpInfo.isIndirect)) {
6865       OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
6866 
6867       // Process the call argument. BasicBlocks are labels, currently appearing
6868       // only in asm's.
6869       if (const BasicBlock *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
6870         OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
6871       } else {
6872         OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
6873       }
6874 
6875       OpVT =
6876           OpInfo
6877               .getCallOperandValEVT(*DAG.getContext(), TLI, DAG.getDataLayout())
6878               .getSimpleVT();
6879     }
6880 
6881     if (OpInfo.Type == InlineAsm::isOutput && !OpInfo.isIndirect) {
6882       // The return value of the call is this value.  As such, there is no
6883       // corresponding argument.
6884       assert(!CS.getType()->isVoidTy() && "Bad inline asm!");
6885       if (StructType *STy = dyn_cast<StructType>(CS.getType())) {
6886         OpVT = TLI.getSimpleValueType(DAG.getDataLayout(),
6887                                       STy->getElementType(ResNo));
6888       } else {
6889         assert(ResNo == 0 && "Asm only has one result!");
6890         OpVT = TLI.getSimpleValueType(DAG.getDataLayout(), CS.getType());
6891       }
6892       ++ResNo;
6893     }
6894 
6895     OpInfo.ConstraintVT = OpVT;
6896 
6897     if (!hasMemory)
6898       hasMemory = OpInfo.hasMemory(TLI);
6899 
6900     // Determine if this InlineAsm MayLoad or MayStore based on the constraints.
6901     // FIXME: Could we compute this on OpInfo rather than TargetConstraints[i]?
6902     auto TargetConstraint = TargetConstraints[i];
6903 
6904     // Compute the constraint code and ConstraintType to use.
6905     TLI.ComputeConstraintToUse(TargetConstraint, SDValue());
6906 
6907     ExtraInfo.update(TargetConstraint);
6908   }
6909 
6910   SDValue Chain, Flag;
6911 
6912   // We won't need to flush pending loads if this asm doesn't touch
6913   // memory and is nonvolatile.
6914   if (hasMemory || IA->hasSideEffects())
6915     Chain = getRoot();
6916   else
6917     Chain = DAG.getRoot();
6918 
6919   // Second pass over the constraints: compute which constraint option to use
6920   // and assign registers to constraints that want a specific physreg.
6921   for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
6922     SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
6923 
6924     // If this is an output operand with a matching input operand, look up the
6925     // matching input. If their types mismatch, e.g. one is an integer, the
6926     // other is floating point, or their sizes are different, flag it as an
6927     // error.
6928     if (OpInfo.hasMatchingInput()) {
6929       SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
6930       patchMatchingInput(OpInfo, Input, DAG);
6931     }
6932 
6933     // Compute the constraint code and ConstraintType to use.
6934     TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
6935 
6936     if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
6937         OpInfo.Type == InlineAsm::isClobber)
6938       continue;
6939 
6940     // If this is a memory input, and if the operand is not indirect, do what we
6941     // need to to provide an address for the memory input.
6942     if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
6943         !OpInfo.isIndirect) {
6944       assert((OpInfo.isMultipleAlternative ||
6945               (OpInfo.Type == InlineAsm::isInput)) &&
6946              "Can only indirectify direct input operands!");
6947 
6948       // Memory operands really want the address of the value.
6949       Chain = getAddressForMemoryInput(Chain, getCurSDLoc(), OpInfo, DAG);
6950 
6951       // There is no longer a Value* corresponding to this operand.
6952       OpInfo.CallOperandVal = nullptr;
6953 
6954       // It is now an indirect operand.
6955       OpInfo.isIndirect = true;
6956     }
6957 
6958     // If this constraint is for a specific register, allocate it before
6959     // anything else.
6960     if (OpInfo.ConstraintType == TargetLowering::C_Register)
6961       GetRegistersForValue(DAG, TLI, getCurSDLoc(), OpInfo);
6962   }
6963 
6964   // Third pass - Loop over all of the operands, assigning virtual or physregs
6965   // to register class operands.
6966   for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
6967     SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
6968 
6969     // C_Register operands have already been allocated, Other/Memory don't need
6970     // to be.
6971     if (OpInfo.ConstraintType == TargetLowering::C_RegisterClass)
6972       GetRegistersForValue(DAG, TLI, getCurSDLoc(), OpInfo);
6973   }
6974 
6975   // AsmNodeOperands - The operands for the ISD::INLINEASM node.
6976   std::vector<SDValue> AsmNodeOperands;
6977   AsmNodeOperands.push_back(SDValue());  // reserve space for input chain
6978   AsmNodeOperands.push_back(DAG.getTargetExternalSymbol(
6979       IA->getAsmString().c_str(), TLI.getPointerTy(DAG.getDataLayout())));
6980 
6981   // If we have a !srcloc metadata node associated with it, we want to attach
6982   // this to the ultimately generated inline asm machineinstr.  To do this, we
6983   // pass in the third operand as this (potentially null) inline asm MDNode.
6984   const MDNode *SrcLoc = CS.getInstruction()->getMetadata("srcloc");
6985   AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc));
6986 
6987   // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore
6988   // bits as operand 3.
6989   AsmNodeOperands.push_back(DAG.getTargetConstant(
6990       ExtraInfo.get(), getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
6991 
6992   // Loop over all of the inputs, copying the operand values into the
6993   // appropriate registers and processing the output regs.
6994   RegsForValue RetValRegs;
6995 
6996   // IndirectStoresToEmit - The set of stores to emit after the inline asm node.
6997   std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit;
6998 
6999   for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
7000     SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
7001 
7002     switch (OpInfo.Type) {
7003     case InlineAsm::isOutput: {
7004       if (OpInfo.ConstraintType != TargetLowering::C_RegisterClass &&
7005           OpInfo.ConstraintType != TargetLowering::C_Register) {
7006         // Memory output, or 'other' output (e.g. 'X' constraint).
7007         assert(OpInfo.isIndirect && "Memory output must be indirect operand");
7008 
7009         unsigned ConstraintID =
7010             TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
7011         assert(ConstraintID != InlineAsm::Constraint_Unknown &&
7012                "Failed to convert memory constraint code to constraint id.");
7013 
7014         // Add information to the INLINEASM node to know about this output.
7015         unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
7016         OpFlags = InlineAsm::getFlagWordForMem(OpFlags, ConstraintID);
7017         AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlags, getCurSDLoc(),
7018                                                         MVT::i32));
7019         AsmNodeOperands.push_back(OpInfo.CallOperand);
7020         break;
7021       }
7022 
7023       // Otherwise, this is a register or register class output.
7024 
7025       // Copy the output from the appropriate register.  Find a register that
7026       // we can use.
7027       if (OpInfo.AssignedRegs.Regs.empty()) {
7028         emitInlineAsmError(
7029             CS, "couldn't allocate output register for constraint '" +
7030                     Twine(OpInfo.ConstraintCode) + "'");
7031         return;
7032       }
7033 
7034       // If this is an indirect operand, store through the pointer after the
7035       // asm.
7036       if (OpInfo.isIndirect) {
7037         IndirectStoresToEmit.push_back(std::make_pair(OpInfo.AssignedRegs,
7038                                                       OpInfo.CallOperandVal));
7039       } else {
7040         // This is the result value of the call.
7041         assert(!CS.getType()->isVoidTy() && "Bad inline asm!");
7042         // Concatenate this output onto the outputs list.
7043         RetValRegs.append(OpInfo.AssignedRegs);
7044       }
7045 
7046       // Add information to the INLINEASM node to know that this register is
7047       // set.
7048       OpInfo.AssignedRegs
7049           .AddInlineAsmOperands(OpInfo.isEarlyClobber
7050                                     ? InlineAsm::Kind_RegDefEarlyClobber
7051                                     : InlineAsm::Kind_RegDef,
7052                                 false, 0, getCurSDLoc(), DAG, AsmNodeOperands);
7053       break;
7054     }
7055     case InlineAsm::isInput: {
7056       SDValue InOperandVal = OpInfo.CallOperand;
7057 
7058       if (OpInfo.isMatchingInputConstraint()) {
7059         // If this is required to match an output register we have already set,
7060         // just use its register.
7061         auto CurOp = findMatchingInlineAsmOperand(OpInfo.getMatchedOperand(),
7062                                                   AsmNodeOperands);
7063         unsigned OpFlag =
7064           cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
7065         if (InlineAsm::isRegDefKind(OpFlag) ||
7066             InlineAsm::isRegDefEarlyClobberKind(OpFlag)) {
7067           // Add (OpFlag&0xffff)>>3 registers to MatchedRegs.
7068           if (OpInfo.isIndirect) {
7069             // This happens on gcc/testsuite/gcc.dg/pr8788-1.c
7070             emitInlineAsmError(CS, "inline asm not supported yet:"
7071                                    " don't know how to handle tied "
7072                                    "indirect register inputs");
7073             return;
7074           }
7075 
7076           MVT RegVT = AsmNodeOperands[CurOp+1].getSimpleValueType();
7077           SmallVector<unsigned, 4> Regs;
7078 
7079           if (!createVirtualRegs(Regs,
7080                                  InlineAsm::getNumOperandRegisters(OpFlag),
7081                                  RegVT, DAG)) {
7082             emitInlineAsmError(CS, "inline asm error: This value type register "
7083                                    "class is not natively supported!");
7084             return;
7085           }
7086 
7087           RegsForValue MatchedRegs(Regs, RegVT, InOperandVal.getValueType());
7088 
7089           SDLoc dl = getCurSDLoc();
7090           // Use the produced MatchedRegs object to
7091           MatchedRegs.getCopyToRegs(InOperandVal, DAG, dl,
7092                                     Chain, &Flag, CS.getInstruction());
7093           MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse,
7094                                            true, OpInfo.getMatchedOperand(), dl,
7095                                            DAG, AsmNodeOperands);
7096           break;
7097         }
7098 
7099         assert(InlineAsm::isMemKind(OpFlag) && "Unknown matching constraint!");
7100         assert(InlineAsm::getNumOperandRegisters(OpFlag) == 1 &&
7101                "Unexpected number of operands");
7102         // Add information to the INLINEASM node to know about this input.
7103         // See InlineAsm.h isUseOperandTiedToDef.
7104         OpFlag = InlineAsm::convertMemFlagWordToMatchingFlagWord(OpFlag);
7105         OpFlag = InlineAsm::getFlagWordForMatchingOp(OpFlag,
7106                                                     OpInfo.getMatchedOperand());
7107         AsmNodeOperands.push_back(DAG.getTargetConstant(
7108             OpFlag, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
7109         AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
7110         break;
7111       }
7112 
7113       // Treat indirect 'X' constraint as memory.
7114       if (OpInfo.ConstraintType == TargetLowering::C_Other &&
7115           OpInfo.isIndirect)
7116         OpInfo.ConstraintType = TargetLowering::C_Memory;
7117 
7118       if (OpInfo.ConstraintType == TargetLowering::C_Other) {
7119         std::vector<SDValue> Ops;
7120         TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode,
7121                                           Ops, DAG);
7122         if (Ops.empty()) {
7123           emitInlineAsmError(CS, "invalid operand for inline asm constraint '" +
7124                                      Twine(OpInfo.ConstraintCode) + "'");
7125           return;
7126         }
7127 
7128         // Add information to the INLINEASM node to know about this input.
7129         unsigned ResOpType =
7130           InlineAsm::getFlagWord(InlineAsm::Kind_Imm, Ops.size());
7131         AsmNodeOperands.push_back(DAG.getTargetConstant(
7132             ResOpType, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
7133         AsmNodeOperands.insert(AsmNodeOperands.end(), Ops.begin(), Ops.end());
7134         break;
7135       }
7136 
7137       if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
7138         assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
7139         assert(InOperandVal.getValueType() ==
7140                    TLI.getPointerTy(DAG.getDataLayout()) &&
7141                "Memory operands expect pointer values");
7142 
7143         unsigned ConstraintID =
7144             TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
7145         assert(ConstraintID != InlineAsm::Constraint_Unknown &&
7146                "Failed to convert memory constraint code to constraint id.");
7147 
7148         // Add information to the INLINEASM node to know about this input.
7149         unsigned ResOpType = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
7150         ResOpType = InlineAsm::getFlagWordForMem(ResOpType, ConstraintID);
7151         AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
7152                                                         getCurSDLoc(),
7153                                                         MVT::i32));
7154         AsmNodeOperands.push_back(InOperandVal);
7155         break;
7156       }
7157 
7158       assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
7159               OpInfo.ConstraintType == TargetLowering::C_Register) &&
7160              "Unknown constraint type!");
7161 
7162       // TODO: Support this.
7163       if (OpInfo.isIndirect) {
7164         emitInlineAsmError(
7165             CS, "Don't know how to handle indirect register inputs yet "
7166                 "for constraint '" +
7167                     Twine(OpInfo.ConstraintCode) + "'");
7168         return;
7169       }
7170 
7171       // Copy the input into the appropriate registers.
7172       if (OpInfo.AssignedRegs.Regs.empty()) {
7173         emitInlineAsmError(CS, "couldn't allocate input reg for constraint '" +
7174                                    Twine(OpInfo.ConstraintCode) + "'");
7175         return;
7176       }
7177 
7178       SDLoc dl = getCurSDLoc();
7179 
7180       OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, dl,
7181                                         Chain, &Flag, CS.getInstruction());
7182 
7183       OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse, false, 0,
7184                                                dl, DAG, AsmNodeOperands);
7185       break;
7186     }
7187     case InlineAsm::isClobber: {
7188       // Add the clobbered value to the operand list, so that the register
7189       // allocator is aware that the physreg got clobbered.
7190       if (!OpInfo.AssignedRegs.Regs.empty())
7191         OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_Clobber,
7192                                                  false, 0, getCurSDLoc(), DAG,
7193                                                  AsmNodeOperands);
7194       break;
7195     }
7196     }
7197   }
7198 
7199   // Finish up input operands.  Set the input chain and add the flag last.
7200   AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
7201   if (Flag.getNode()) AsmNodeOperands.push_back(Flag);
7202 
7203   Chain = DAG.getNode(ISD::INLINEASM, getCurSDLoc(),
7204                       DAG.getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
7205   Flag = Chain.getValue(1);
7206 
7207   // If this asm returns a register value, copy the result from that register
7208   // and set it as the value of the call.
7209   if (!RetValRegs.Regs.empty()) {
7210     SDValue Val = RetValRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(),
7211                                              Chain, &Flag, CS.getInstruction());
7212 
7213     // FIXME: Why don't we do this for inline asms with MRVs?
7214     if (CS.getType()->isSingleValueType() && CS.getType()->isSized()) {
7215       EVT ResultType = TLI.getValueType(DAG.getDataLayout(), CS.getType());
7216 
7217       // If any of the results of the inline asm is a vector, it may have the
7218       // wrong width/num elts.  This can happen for register classes that can
7219       // contain multiple different value types.  The preg or vreg allocated may
7220       // not have the same VT as was expected.  Convert it to the right type
7221       // with bit_convert.
7222       if (ResultType != Val.getValueType() && Val.getValueType().isVector()) {
7223         Val = DAG.getNode(ISD::BITCAST, getCurSDLoc(),
7224                           ResultType, Val);
7225 
7226       } else if (ResultType != Val.getValueType() &&
7227                  ResultType.isInteger() && Val.getValueType().isInteger()) {
7228         // If a result value was tied to an input value, the computed result may
7229         // have a wider width than the expected result.  Extract the relevant
7230         // portion.
7231         Val = DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), ResultType, Val);
7232       }
7233 
7234       assert(ResultType == Val.getValueType() && "Asm result value mismatch!");
7235     }
7236 
7237     setValue(CS.getInstruction(), Val);
7238     // Don't need to use this as a chain in this case.
7239     if (!IA->hasSideEffects() && !hasMemory && IndirectStoresToEmit.empty())
7240       return;
7241   }
7242 
7243   std::vector<std::pair<SDValue, const Value *> > StoresToEmit;
7244 
7245   // Process indirect outputs, first output all of the flagged copies out of
7246   // physregs.
7247   for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) {
7248     RegsForValue &OutRegs = IndirectStoresToEmit[i].first;
7249     const Value *Ptr = IndirectStoresToEmit[i].second;
7250     SDValue OutVal = OutRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(),
7251                                              Chain, &Flag, IA);
7252     StoresToEmit.push_back(std::make_pair(OutVal, Ptr));
7253   }
7254 
7255   // Emit the non-flagged stores from the physregs.
7256   SmallVector<SDValue, 8> OutChains;
7257   for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i) {
7258     SDValue Val = DAG.getStore(Chain, getCurSDLoc(), StoresToEmit[i].first,
7259                                getValue(StoresToEmit[i].second),
7260                                MachinePointerInfo(StoresToEmit[i].second));
7261     OutChains.push_back(Val);
7262   }
7263 
7264   if (!OutChains.empty())
7265     Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, OutChains);
7266 
7267   DAG.setRoot(Chain);
7268 }
7269 
7270 void SelectionDAGBuilder::emitInlineAsmError(ImmutableCallSite CS,
7271                                              const Twine &Message) {
7272   LLVMContext &Ctx = *DAG.getContext();
7273   Ctx.emitError(CS.getInstruction(), Message);
7274 
7275   // Make sure we leave the DAG in a valid state
7276   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7277   auto VT = TLI.getValueType(DAG.getDataLayout(), CS.getType());
7278   setValue(CS.getInstruction(), DAG.getUNDEF(VT));
7279 }
7280 
7281 void SelectionDAGBuilder::visitVAStart(const CallInst &I) {
7282   DAG.setRoot(DAG.getNode(ISD::VASTART, getCurSDLoc(),
7283                           MVT::Other, getRoot(),
7284                           getValue(I.getArgOperand(0)),
7285                           DAG.getSrcValue(I.getArgOperand(0))));
7286 }
7287 
7288 void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
7289   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7290   const DataLayout &DL = DAG.getDataLayout();
7291   SDValue V = DAG.getVAArg(TLI.getValueType(DAG.getDataLayout(), I.getType()),
7292                            getCurSDLoc(), getRoot(), getValue(I.getOperand(0)),
7293                            DAG.getSrcValue(I.getOperand(0)),
7294                            DL.getABITypeAlignment(I.getType()));
7295   setValue(&I, V);
7296   DAG.setRoot(V.getValue(1));
7297 }
7298 
7299 void SelectionDAGBuilder::visitVAEnd(const CallInst &I) {
7300   DAG.setRoot(DAG.getNode(ISD::VAEND, getCurSDLoc(),
7301                           MVT::Other, getRoot(),
7302                           getValue(I.getArgOperand(0)),
7303                           DAG.getSrcValue(I.getArgOperand(0))));
7304 }
7305 
7306 void SelectionDAGBuilder::visitVACopy(const CallInst &I) {
7307   DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurSDLoc(),
7308                           MVT::Other, getRoot(),
7309                           getValue(I.getArgOperand(0)),
7310                           getValue(I.getArgOperand(1)),
7311                           DAG.getSrcValue(I.getArgOperand(0)),
7312                           DAG.getSrcValue(I.getArgOperand(1))));
7313 }
7314 
7315 SDValue SelectionDAGBuilder::lowerRangeToAssertZExt(SelectionDAG &DAG,
7316                                                     const Instruction &I,
7317                                                     SDValue Op) {
7318   const MDNode *Range = I.getMetadata(LLVMContext::MD_range);
7319   if (!Range)
7320     return Op;
7321 
7322   ConstantRange CR = getConstantRangeFromMetadata(*Range);
7323   if (CR.isFullSet() || CR.isEmptySet() || CR.isWrappedSet())
7324     return Op;
7325 
7326   APInt Lo = CR.getUnsignedMin();
7327   if (!Lo.isMinValue())
7328     return Op;
7329 
7330   APInt Hi = CR.getUnsignedMax();
7331   unsigned Bits = Hi.getActiveBits();
7332 
7333   EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
7334 
7335   SDLoc SL = getCurSDLoc();
7336 
7337   SDValue ZExt = DAG.getNode(ISD::AssertZext, SL, Op.getValueType(), Op,
7338                              DAG.getValueType(SmallVT));
7339   unsigned NumVals = Op.getNode()->getNumValues();
7340   if (NumVals == 1)
7341     return ZExt;
7342 
7343   SmallVector<SDValue, 4> Ops;
7344 
7345   Ops.push_back(ZExt);
7346   for (unsigned I = 1; I != NumVals; ++I)
7347     Ops.push_back(Op.getValue(I));
7348 
7349   return DAG.getMergeValues(Ops, SL);
7350 }
7351 
7352 /// \brief Populate a CallLowerinInfo (into \p CLI) based on the properties of
7353 /// the call being lowered.
7354 ///
7355 /// This is a helper for lowering intrinsics that follow a target calling
7356 /// convention or require stack pointer adjustment. Only a subset of the
7357 /// intrinsic's operands need to participate in the calling convention.
7358 void SelectionDAGBuilder::populateCallLoweringInfo(
7359     TargetLowering::CallLoweringInfo &CLI, ImmutableCallSite CS,
7360     unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy,
7361     bool IsPatchPoint) {
7362   TargetLowering::ArgListTy Args;
7363   Args.reserve(NumArgs);
7364 
7365   // Populate the argument list.
7366   // Attributes for args start at offset 1, after the return attribute.
7367   for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs;
7368        ArgI != ArgE; ++ArgI) {
7369     const Value *V = CS->getOperand(ArgI);
7370 
7371     assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
7372 
7373     TargetLowering::ArgListEntry Entry;
7374     Entry.Node = getValue(V);
7375     Entry.Ty = V->getType();
7376     Entry.setAttributes(&CS, ArgIdx);
7377     Args.push_back(Entry);
7378   }
7379 
7380   CLI.setDebugLoc(getCurSDLoc())
7381       .setChain(getRoot())
7382       .setCallee(CS.getCallingConv(), ReturnTy, Callee, std::move(Args))
7383       .setDiscardResult(CS->use_empty())
7384       .setIsPatchPoint(IsPatchPoint);
7385 }
7386 
7387 /// \brief Add a stack map intrinsic call's live variable operands to a stackmap
7388 /// or patchpoint target node's operand list.
7389 ///
7390 /// Constants are converted to TargetConstants purely as an optimization to
7391 /// avoid constant materialization and register allocation.
7392 ///
7393 /// FrameIndex operands are converted to TargetFrameIndex so that ISEL does not
7394 /// generate addess computation nodes, and so ExpandISelPseudo can convert the
7395 /// TargetFrameIndex into a DirectMemRefOp StackMap location. This avoids
7396 /// address materialization and register allocation, but may also be required
7397 /// for correctness. If a StackMap (or PatchPoint) intrinsic directly uses an
7398 /// alloca in the entry block, then the runtime may assume that the alloca's
7399 /// StackMap location can be read immediately after compilation and that the
7400 /// location is valid at any point during execution (this is similar to the
7401 /// assumption made by the llvm.gcroot intrinsic). If the alloca's location were
7402 /// only available in a register, then the runtime would need to trap when
7403 /// execution reaches the StackMap in order to read the alloca's location.
7404 static void addStackMapLiveVars(ImmutableCallSite CS, unsigned StartIdx,
7405                                 const SDLoc &DL, SmallVectorImpl<SDValue> &Ops,
7406                                 SelectionDAGBuilder &Builder) {
7407   for (unsigned i = StartIdx, e = CS.arg_size(); i != e; ++i) {
7408     SDValue OpVal = Builder.getValue(CS.getArgument(i));
7409     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(OpVal)) {
7410       Ops.push_back(
7411         Builder.DAG.getTargetConstant(StackMaps::ConstantOp, DL, MVT::i64));
7412       Ops.push_back(
7413         Builder.DAG.getTargetConstant(C->getSExtValue(), DL, MVT::i64));
7414     } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(OpVal)) {
7415       const TargetLowering &TLI = Builder.DAG.getTargetLoweringInfo();
7416       Ops.push_back(Builder.DAG.getTargetFrameIndex(
7417           FI->getIndex(), TLI.getFrameIndexTy(Builder.DAG.getDataLayout())));
7418     } else
7419       Ops.push_back(OpVal);
7420   }
7421 }
7422 
7423 /// \brief Lower llvm.experimental.stackmap directly to its target opcode.
7424 void SelectionDAGBuilder::visitStackmap(const CallInst &CI) {
7425   // void @llvm.experimental.stackmap(i32 <id>, i32 <numShadowBytes>,
7426   //                                  [live variables...])
7427 
7428   assert(CI.getType()->isVoidTy() && "Stackmap cannot return a value.");
7429 
7430   SDValue Chain, InFlag, Callee, NullPtr;
7431   SmallVector<SDValue, 32> Ops;
7432 
7433   SDLoc DL = getCurSDLoc();
7434   Callee = getValue(CI.getCalledValue());
7435   NullPtr = DAG.getIntPtrConstant(0, DL, true);
7436 
7437   // The stackmap intrinsic only records the live variables (the arguemnts
7438   // passed to it) and emits NOPS (if requested). Unlike the patchpoint
7439   // intrinsic, this won't be lowered to a function call. This means we don't
7440   // have to worry about calling conventions and target specific lowering code.
7441   // Instead we perform the call lowering right here.
7442   //
7443   // chain, flag = CALLSEQ_START(chain, 0, 0)
7444   // chain, flag = STACKMAP(id, nbytes, ..., chain, flag)
7445   // chain, flag = CALLSEQ_END(chain, 0, 0, flag)
7446   //
7447   Chain = DAG.getCALLSEQ_START(getRoot(), 0, 0, DL);
7448   InFlag = Chain.getValue(1);
7449 
7450   // Add the <id> and <numBytes> constants.
7451   SDValue IDVal = getValue(CI.getOperand(PatchPointOpers::IDPos));
7452   Ops.push_back(DAG.getTargetConstant(
7453                   cast<ConstantSDNode>(IDVal)->getZExtValue(), DL, MVT::i64));
7454   SDValue NBytesVal = getValue(CI.getOperand(PatchPointOpers::NBytesPos));
7455   Ops.push_back(DAG.getTargetConstant(
7456                   cast<ConstantSDNode>(NBytesVal)->getZExtValue(), DL,
7457                   MVT::i32));
7458 
7459   // Push live variables for the stack map.
7460   addStackMapLiveVars(&CI, 2, DL, Ops, *this);
7461 
7462   // We are not pushing any register mask info here on the operands list,
7463   // because the stackmap doesn't clobber anything.
7464 
7465   // Push the chain and the glue flag.
7466   Ops.push_back(Chain);
7467   Ops.push_back(InFlag);
7468 
7469   // Create the STACKMAP node.
7470   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7471   SDNode *SM = DAG.getMachineNode(TargetOpcode::STACKMAP, DL, NodeTys, Ops);
7472   Chain = SDValue(SM, 0);
7473   InFlag = Chain.getValue(1);
7474 
7475   Chain = DAG.getCALLSEQ_END(Chain, NullPtr, NullPtr, InFlag, DL);
7476 
7477   // Stackmaps don't generate values, so nothing goes into the NodeMap.
7478 
7479   // Set the root to the target-lowered call chain.
7480   DAG.setRoot(Chain);
7481 
7482   // Inform the Frame Information that we have a stackmap in this function.
7483   FuncInfo.MF->getFrameInfo().setHasStackMap();
7484 }
7485 
7486 /// \brief Lower llvm.experimental.patchpoint directly to its target opcode.
7487 void SelectionDAGBuilder::visitPatchpoint(ImmutableCallSite CS,
7488                                           const BasicBlock *EHPadBB) {
7489   // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
7490   //                                                 i32 <numBytes>,
7491   //                                                 i8* <target>,
7492   //                                                 i32 <numArgs>,
7493   //                                                 [Args...],
7494   //                                                 [live variables...])
7495 
7496   CallingConv::ID CC = CS.getCallingConv();
7497   bool IsAnyRegCC = CC == CallingConv::AnyReg;
7498   bool HasDef = !CS->getType()->isVoidTy();
7499   SDLoc dl = getCurSDLoc();
7500   SDValue Callee = getValue(CS->getOperand(PatchPointOpers::TargetPos));
7501 
7502   // Handle immediate and symbolic callees.
7503   if (auto* ConstCallee = dyn_cast<ConstantSDNode>(Callee))
7504     Callee = DAG.getIntPtrConstant(ConstCallee->getZExtValue(), dl,
7505                                    /*isTarget=*/true);
7506   else if (auto* SymbolicCallee = dyn_cast<GlobalAddressSDNode>(Callee))
7507     Callee =  DAG.getTargetGlobalAddress(SymbolicCallee->getGlobal(),
7508                                          SDLoc(SymbolicCallee),
7509                                          SymbolicCallee->getValueType(0));
7510 
7511   // Get the real number of arguments participating in the call <numArgs>
7512   SDValue NArgVal = getValue(CS.getArgument(PatchPointOpers::NArgPos));
7513   unsigned NumArgs = cast<ConstantSDNode>(NArgVal)->getZExtValue();
7514 
7515   // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
7516   // Intrinsics include all meta-operands up to but not including CC.
7517   unsigned NumMetaOpers = PatchPointOpers::CCPos;
7518   assert(CS.arg_size() >= NumMetaOpers + NumArgs &&
7519          "Not enough arguments provided to the patchpoint intrinsic");
7520 
7521   // For AnyRegCC the arguments are lowered later on manually.
7522   unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
7523   Type *ReturnTy =
7524     IsAnyRegCC ? Type::getVoidTy(*DAG.getContext()) : CS->getType();
7525 
7526   TargetLowering::CallLoweringInfo CLI(DAG);
7527   populateCallLoweringInfo(CLI, CS, NumMetaOpers, NumCallArgs, Callee, ReturnTy,
7528                            true);
7529   std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
7530 
7531   SDNode *CallEnd = Result.second.getNode();
7532   if (HasDef && (CallEnd->getOpcode() == ISD::CopyFromReg))
7533     CallEnd = CallEnd->getOperand(0).getNode();
7534 
7535   /// Get a call instruction from the call sequence chain.
7536   /// Tail calls are not allowed.
7537   assert(CallEnd->getOpcode() == ISD::CALLSEQ_END &&
7538          "Expected a callseq node.");
7539   SDNode *Call = CallEnd->getOperand(0).getNode();
7540   bool HasGlue = Call->getGluedNode();
7541 
7542   // Replace the target specific call node with the patchable intrinsic.
7543   SmallVector<SDValue, 8> Ops;
7544 
7545   // Add the <id> and <numBytes> constants.
7546   SDValue IDVal = getValue(CS->getOperand(PatchPointOpers::IDPos));
7547   Ops.push_back(DAG.getTargetConstant(
7548                   cast<ConstantSDNode>(IDVal)->getZExtValue(), dl, MVT::i64));
7549   SDValue NBytesVal = getValue(CS->getOperand(PatchPointOpers::NBytesPos));
7550   Ops.push_back(DAG.getTargetConstant(
7551                   cast<ConstantSDNode>(NBytesVal)->getZExtValue(), dl,
7552                   MVT::i32));
7553 
7554   // Add the callee.
7555   Ops.push_back(Callee);
7556 
7557   // Adjust <numArgs> to account for any arguments that have been passed on the
7558   // stack instead.
7559   // Call Node: Chain, Target, {Args}, RegMask, [Glue]
7560   unsigned NumCallRegArgs = Call->getNumOperands() - (HasGlue ? 4 : 3);
7561   NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs;
7562   Ops.push_back(DAG.getTargetConstant(NumCallRegArgs, dl, MVT::i32));
7563 
7564   // Add the calling convention
7565   Ops.push_back(DAG.getTargetConstant((unsigned)CC, dl, MVT::i32));
7566 
7567   // Add the arguments we omitted previously. The register allocator should
7568   // place these in any free register.
7569   if (IsAnyRegCC)
7570     for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i)
7571       Ops.push_back(getValue(CS.getArgument(i)));
7572 
7573   // Push the arguments from the call instruction up to the register mask.
7574   SDNode::op_iterator e = HasGlue ? Call->op_end()-2 : Call->op_end()-1;
7575   Ops.append(Call->op_begin() + 2, e);
7576 
7577   // Push live variables for the stack map.
7578   addStackMapLiveVars(CS, NumMetaOpers + NumArgs, dl, Ops, *this);
7579 
7580   // Push the register mask info.
7581   if (HasGlue)
7582     Ops.push_back(*(Call->op_end()-2));
7583   else
7584     Ops.push_back(*(Call->op_end()-1));
7585 
7586   // Push the chain (this is originally the first operand of the call, but
7587   // becomes now the last or second to last operand).
7588   Ops.push_back(*(Call->op_begin()));
7589 
7590   // Push the glue flag (last operand).
7591   if (HasGlue)
7592     Ops.push_back(*(Call->op_end()-1));
7593 
7594   SDVTList NodeTys;
7595   if (IsAnyRegCC && HasDef) {
7596     // Create the return types based on the intrinsic definition
7597     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7598     SmallVector<EVT, 3> ValueVTs;
7599     ComputeValueVTs(TLI, DAG.getDataLayout(), CS->getType(), ValueVTs);
7600     assert(ValueVTs.size() == 1 && "Expected only one return value type.");
7601 
7602     // There is always a chain and a glue type at the end
7603     ValueVTs.push_back(MVT::Other);
7604     ValueVTs.push_back(MVT::Glue);
7605     NodeTys = DAG.getVTList(ValueVTs);
7606   } else
7607     NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7608 
7609   // Replace the target specific call node with a PATCHPOINT node.
7610   MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHPOINT,
7611                                          dl, NodeTys, Ops);
7612 
7613   // Update the NodeMap.
7614   if (HasDef) {
7615     if (IsAnyRegCC)
7616       setValue(CS.getInstruction(), SDValue(MN, 0));
7617     else
7618       setValue(CS.getInstruction(), Result.first);
7619   }
7620 
7621   // Fixup the consumers of the intrinsic. The chain and glue may be used in the
7622   // call sequence. Furthermore the location of the chain and glue can change
7623   // when the AnyReg calling convention is used and the intrinsic returns a
7624   // value.
7625   if (IsAnyRegCC && HasDef) {
7626     SDValue From[] = {SDValue(Call, 0), SDValue(Call, 1)};
7627     SDValue To[] = {SDValue(MN, 1), SDValue(MN, 2)};
7628     DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
7629   } else
7630     DAG.ReplaceAllUsesWith(Call, MN);
7631   DAG.DeleteNode(Call);
7632 
7633   // Inform the Frame Information that we have a patchpoint in this function.
7634   FuncInfo.MF->getFrameInfo().setHasPatchPoint();
7635 }
7636 
7637 void SelectionDAGBuilder::visitVectorReduce(const CallInst &I,
7638                                             unsigned Intrinsic) {
7639   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7640   SDValue Op1 = getValue(I.getArgOperand(0));
7641   SDValue Op2;
7642   if (I.getNumArgOperands() > 1)
7643     Op2 = getValue(I.getArgOperand(1));
7644   SDLoc dl = getCurSDLoc();
7645   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7646   SDValue Res;
7647   FastMathFlags FMF;
7648   if (isa<FPMathOperator>(I))
7649     FMF = I.getFastMathFlags();
7650   SDNodeFlags SDFlags;
7651   SDFlags.setNoNaNs(FMF.noNaNs());
7652 
7653   switch (Intrinsic) {
7654   case Intrinsic::experimental_vector_reduce_fadd:
7655     if (FMF.unsafeAlgebra())
7656       Res = DAG.getNode(ISD::VECREDUCE_FADD, dl, VT, Op2);
7657     else
7658       Res = DAG.getNode(ISD::VECREDUCE_STRICT_FADD, dl, VT, Op1, Op2);
7659     break;
7660   case Intrinsic::experimental_vector_reduce_fmul:
7661     if (FMF.unsafeAlgebra())
7662       Res = DAG.getNode(ISD::VECREDUCE_FMUL, dl, VT, Op2);
7663     else
7664       Res = DAG.getNode(ISD::VECREDUCE_STRICT_FMUL, dl, VT, Op1, Op2);
7665     break;
7666   case Intrinsic::experimental_vector_reduce_add:
7667     Res = DAG.getNode(ISD::VECREDUCE_ADD, dl, VT, Op1);
7668     break;
7669   case Intrinsic::experimental_vector_reduce_mul:
7670     Res = DAG.getNode(ISD::VECREDUCE_MUL, dl, VT, Op1);
7671     break;
7672   case Intrinsic::experimental_vector_reduce_and:
7673     Res = DAG.getNode(ISD::VECREDUCE_AND, dl, VT, Op1);
7674     break;
7675   case Intrinsic::experimental_vector_reduce_or:
7676     Res = DAG.getNode(ISD::VECREDUCE_OR, dl, VT, Op1);
7677     break;
7678   case Intrinsic::experimental_vector_reduce_xor:
7679     Res = DAG.getNode(ISD::VECREDUCE_XOR, dl, VT, Op1);
7680     break;
7681   case Intrinsic::experimental_vector_reduce_smax:
7682     Res = DAG.getNode(ISD::VECREDUCE_SMAX, dl, VT, Op1);
7683     break;
7684   case Intrinsic::experimental_vector_reduce_smin:
7685     Res = DAG.getNode(ISD::VECREDUCE_SMIN, dl, VT, Op1);
7686     break;
7687   case Intrinsic::experimental_vector_reduce_umax:
7688     Res = DAG.getNode(ISD::VECREDUCE_UMAX, dl, VT, Op1);
7689     break;
7690   case Intrinsic::experimental_vector_reduce_umin:
7691     Res = DAG.getNode(ISD::VECREDUCE_UMIN, dl, VT, Op1);
7692     break;
7693   case Intrinsic::experimental_vector_reduce_fmax: {
7694     Res = DAG.getNode(ISD::VECREDUCE_FMAX, dl, VT, Op1, SDFlags);
7695     break;
7696   }
7697   case Intrinsic::experimental_vector_reduce_fmin: {
7698     Res = DAG.getNode(ISD::VECREDUCE_FMIN, dl, VT, Op1, SDFlags);
7699     break;
7700   }
7701   default:
7702     llvm_unreachable("Unhandled vector reduce intrinsic");
7703   }
7704   setValue(&I, Res);
7705 }
7706 
7707 /// Returns an AttributeList representing the attributes applied to the return
7708 /// value of the given call.
7709 static AttributeList getReturnAttrs(TargetLowering::CallLoweringInfo &CLI) {
7710   SmallVector<Attribute::AttrKind, 2> Attrs;
7711   if (CLI.RetSExt)
7712     Attrs.push_back(Attribute::SExt);
7713   if (CLI.RetZExt)
7714     Attrs.push_back(Attribute::ZExt);
7715   if (CLI.IsInReg)
7716     Attrs.push_back(Attribute::InReg);
7717 
7718   return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex,
7719                             Attrs);
7720 }
7721 
7722 /// TargetLowering::LowerCallTo - This is the default LowerCallTo
7723 /// implementation, which just calls LowerCall.
7724 /// FIXME: When all targets are
7725 /// migrated to using LowerCall, this hook should be integrated into SDISel.
7726 std::pair<SDValue, SDValue>
7727 TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
7728   // Handle the incoming return values from the call.
7729   CLI.Ins.clear();
7730   Type *OrigRetTy = CLI.RetTy;
7731   SmallVector<EVT, 4> RetTys;
7732   SmallVector<uint64_t, 4> Offsets;
7733   auto &DL = CLI.DAG.getDataLayout();
7734   ComputeValueVTs(*this, DL, CLI.RetTy, RetTys, &Offsets);
7735 
7736   SmallVector<ISD::OutputArg, 4> Outs;
7737   GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, *this, DL);
7738 
7739   bool CanLowerReturn =
7740       this->CanLowerReturn(CLI.CallConv, CLI.DAG.getMachineFunction(),
7741                            CLI.IsVarArg, Outs, CLI.RetTy->getContext());
7742 
7743   SDValue DemoteStackSlot;
7744   int DemoteStackIdx = -100;
7745   if (!CanLowerReturn) {
7746     // FIXME: equivalent assert?
7747     // assert(!CS.hasInAllocaArgument() &&
7748     //        "sret demotion is incompatible with inalloca");
7749     uint64_t TySize = DL.getTypeAllocSize(CLI.RetTy);
7750     unsigned Align = DL.getPrefTypeAlignment(CLI.RetTy);
7751     MachineFunction &MF = CLI.DAG.getMachineFunction();
7752     DemoteStackIdx = MF.getFrameInfo().CreateStackObject(TySize, Align, false);
7753     Type *StackSlotPtrType = PointerType::getUnqual(CLI.RetTy);
7754 
7755     DemoteStackSlot = CLI.DAG.getFrameIndex(DemoteStackIdx, getFrameIndexTy(DL));
7756     ArgListEntry Entry;
7757     Entry.Node = DemoteStackSlot;
7758     Entry.Ty = StackSlotPtrType;
7759     Entry.IsSExt = false;
7760     Entry.IsZExt = false;
7761     Entry.IsInReg = false;
7762     Entry.IsSRet = true;
7763     Entry.IsNest = false;
7764     Entry.IsByVal = false;
7765     Entry.IsReturned = false;
7766     Entry.IsSwiftSelf = false;
7767     Entry.IsSwiftError = false;
7768     Entry.Alignment = Align;
7769     CLI.getArgs().insert(CLI.getArgs().begin(), Entry);
7770     CLI.RetTy = Type::getVoidTy(CLI.RetTy->getContext());
7771 
7772     // sret demotion isn't compatible with tail-calls, since the sret argument
7773     // points into the callers stack frame.
7774     CLI.IsTailCall = false;
7775   } else {
7776     for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
7777       EVT VT = RetTys[I];
7778       MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), VT);
7779       unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), VT);
7780       for (unsigned i = 0; i != NumRegs; ++i) {
7781         ISD::InputArg MyFlags;
7782         MyFlags.VT = RegisterVT;
7783         MyFlags.ArgVT = VT;
7784         MyFlags.Used = CLI.IsReturnValueUsed;
7785         if (CLI.RetSExt)
7786           MyFlags.Flags.setSExt();
7787         if (CLI.RetZExt)
7788           MyFlags.Flags.setZExt();
7789         if (CLI.IsInReg)
7790           MyFlags.Flags.setInReg();
7791         CLI.Ins.push_back(MyFlags);
7792       }
7793     }
7794   }
7795 
7796   // We push in swifterror return as the last element of CLI.Ins.
7797   ArgListTy &Args = CLI.getArgs();
7798   if (supportSwiftError()) {
7799     for (unsigned i = 0, e = Args.size(); i != e; ++i) {
7800       if (Args[i].IsSwiftError) {
7801         ISD::InputArg MyFlags;
7802         MyFlags.VT = getPointerTy(DL);
7803         MyFlags.ArgVT = EVT(getPointerTy(DL));
7804         MyFlags.Flags.setSwiftError();
7805         CLI.Ins.push_back(MyFlags);
7806       }
7807     }
7808   }
7809 
7810   // Handle all of the outgoing arguments.
7811   CLI.Outs.clear();
7812   CLI.OutVals.clear();
7813   for (unsigned i = 0, e = Args.size(); i != e; ++i) {
7814     SmallVector<EVT, 4> ValueVTs;
7815     ComputeValueVTs(*this, DL, Args[i].Ty, ValueVTs);
7816     Type *FinalType = Args[i].Ty;
7817     if (Args[i].IsByVal)
7818       FinalType = cast<PointerType>(Args[i].Ty)->getElementType();
7819     bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
7820         FinalType, CLI.CallConv, CLI.IsVarArg);
7821     for (unsigned Value = 0, NumValues = ValueVTs.size(); Value != NumValues;
7822          ++Value) {
7823       EVT VT = ValueVTs[Value];
7824       Type *ArgTy = VT.getTypeForEVT(CLI.RetTy->getContext());
7825       SDValue Op = SDValue(Args[i].Node.getNode(),
7826                            Args[i].Node.getResNo() + Value);
7827       ISD::ArgFlagsTy Flags;
7828       unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy);
7829 
7830       if (Args[i].IsZExt)
7831         Flags.setZExt();
7832       if (Args[i].IsSExt)
7833         Flags.setSExt();
7834       if (Args[i].IsInReg) {
7835         // If we are using vectorcall calling convention, a structure that is
7836         // passed InReg - is surely an HVA
7837         if (CLI.CallConv == CallingConv::X86_VectorCall &&
7838             isa<StructType>(FinalType)) {
7839           // The first value of a structure is marked
7840           if (0 == Value)
7841             Flags.setHvaStart();
7842           Flags.setHva();
7843         }
7844         // Set InReg Flag
7845         Flags.setInReg();
7846       }
7847       if (Args[i].IsSRet)
7848         Flags.setSRet();
7849       if (Args[i].IsSwiftSelf)
7850         Flags.setSwiftSelf();
7851       if (Args[i].IsSwiftError)
7852         Flags.setSwiftError();
7853       if (Args[i].IsByVal)
7854         Flags.setByVal();
7855       if (Args[i].IsInAlloca) {
7856         Flags.setInAlloca();
7857         // Set the byval flag for CCAssignFn callbacks that don't know about
7858         // inalloca.  This way we can know how many bytes we should've allocated
7859         // and how many bytes a callee cleanup function will pop.  If we port
7860         // inalloca to more targets, we'll have to add custom inalloca handling
7861         // in the various CC lowering callbacks.
7862         Flags.setByVal();
7863       }
7864       if (Args[i].IsByVal || Args[i].IsInAlloca) {
7865         PointerType *Ty = cast<PointerType>(Args[i].Ty);
7866         Type *ElementTy = Ty->getElementType();
7867         Flags.setByValSize(DL.getTypeAllocSize(ElementTy));
7868         // For ByVal, alignment should come from FE.  BE will guess if this
7869         // info is not there but there are cases it cannot get right.
7870         unsigned FrameAlign;
7871         if (Args[i].Alignment)
7872           FrameAlign = Args[i].Alignment;
7873         else
7874           FrameAlign = getByValTypeAlignment(ElementTy, DL);
7875         Flags.setByValAlign(FrameAlign);
7876       }
7877       if (Args[i].IsNest)
7878         Flags.setNest();
7879       if (NeedsRegBlock)
7880         Flags.setInConsecutiveRegs();
7881       Flags.setOrigAlign(OriginalAlignment);
7882 
7883       MVT PartVT = getRegisterType(CLI.RetTy->getContext(), VT);
7884       unsigned NumParts = getNumRegisters(CLI.RetTy->getContext(), VT);
7885       SmallVector<SDValue, 4> Parts(NumParts);
7886       ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
7887 
7888       if (Args[i].IsSExt)
7889         ExtendKind = ISD::SIGN_EXTEND;
7890       else if (Args[i].IsZExt)
7891         ExtendKind = ISD::ZERO_EXTEND;
7892 
7893       // Conservatively only handle 'returned' on non-vectors for now
7894       if (Args[i].IsReturned && !Op.getValueType().isVector()) {
7895         assert(CLI.RetTy == Args[i].Ty && RetTys.size() == NumValues &&
7896                "unexpected use of 'returned'");
7897         // Before passing 'returned' to the target lowering code, ensure that
7898         // either the register MVT and the actual EVT are the same size or that
7899         // the return value and argument are extended in the same way; in these
7900         // cases it's safe to pass the argument register value unchanged as the
7901         // return register value (although it's at the target's option whether
7902         // to do so)
7903         // TODO: allow code generation to take advantage of partially preserved
7904         // registers rather than clobbering the entire register when the
7905         // parameter extension method is not compatible with the return
7906         // extension method
7907         if ((NumParts * PartVT.getSizeInBits() == VT.getSizeInBits()) ||
7908             (ExtendKind != ISD::ANY_EXTEND && CLI.RetSExt == Args[i].IsSExt &&
7909              CLI.RetZExt == Args[i].IsZExt))
7910           Flags.setReturned();
7911       }
7912 
7913       getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT,
7914                      CLI.CS ? CLI.CS->getInstruction() : nullptr, ExtendKind);
7915 
7916       for (unsigned j = 0; j != NumParts; ++j) {
7917         // if it isn't first piece, alignment must be 1
7918         ISD::OutputArg MyFlags(Flags, Parts[j].getValueType(), VT,
7919                                i < CLI.NumFixedArgs,
7920                                i, j*Parts[j].getValueType().getStoreSize());
7921         if (NumParts > 1 && j == 0)
7922           MyFlags.Flags.setSplit();
7923         else if (j != 0) {
7924           MyFlags.Flags.setOrigAlign(1);
7925           if (j == NumParts - 1)
7926             MyFlags.Flags.setSplitEnd();
7927         }
7928 
7929         CLI.Outs.push_back(MyFlags);
7930         CLI.OutVals.push_back(Parts[j]);
7931       }
7932 
7933       if (NeedsRegBlock && Value == NumValues - 1)
7934         CLI.Outs[CLI.Outs.size() - 1].Flags.setInConsecutiveRegsLast();
7935     }
7936   }
7937 
7938   SmallVector<SDValue, 4> InVals;
7939   CLI.Chain = LowerCall(CLI, InVals);
7940 
7941   // Update CLI.InVals to use outside of this function.
7942   CLI.InVals = InVals;
7943 
7944   // Verify that the target's LowerCall behaved as expected.
7945   assert(CLI.Chain.getNode() && CLI.Chain.getValueType() == MVT::Other &&
7946          "LowerCall didn't return a valid chain!");
7947   assert((!CLI.IsTailCall || InVals.empty()) &&
7948          "LowerCall emitted a return value for a tail call!");
7949   assert((CLI.IsTailCall || InVals.size() == CLI.Ins.size()) &&
7950          "LowerCall didn't emit the correct number of values!");
7951 
7952   // For a tail call, the return value is merely live-out and there aren't
7953   // any nodes in the DAG representing it. Return a special value to
7954   // indicate that a tail call has been emitted and no more Instructions
7955   // should be processed in the current block.
7956   if (CLI.IsTailCall) {
7957     CLI.DAG.setRoot(CLI.Chain);
7958     return std::make_pair(SDValue(), SDValue());
7959   }
7960 
7961 #ifndef NDEBUG
7962   for (unsigned i = 0, e = CLI.Ins.size(); i != e; ++i) {
7963     assert(InVals[i].getNode() && "LowerCall emitted a null value!");
7964     assert(EVT(CLI.Ins[i].VT) == InVals[i].getValueType() &&
7965            "LowerCall emitted a value with the wrong type!");
7966   }
7967 #endif
7968 
7969   SmallVector<SDValue, 4> ReturnValues;
7970   if (!CanLowerReturn) {
7971     // The instruction result is the result of loading from the
7972     // hidden sret parameter.
7973     SmallVector<EVT, 1> PVTs;
7974     Type *PtrRetTy = PointerType::getUnqual(OrigRetTy);
7975 
7976     ComputeValueVTs(*this, DL, PtrRetTy, PVTs);
7977     assert(PVTs.size() == 1 && "Pointers should fit in one register");
7978     EVT PtrVT = PVTs[0];
7979 
7980     unsigned NumValues = RetTys.size();
7981     ReturnValues.resize(NumValues);
7982     SmallVector<SDValue, 4> Chains(NumValues);
7983 
7984     // An aggregate return value cannot wrap around the address space, so
7985     // offsets to its parts don't wrap either.
7986     SDNodeFlags Flags;
7987     Flags.setNoUnsignedWrap(true);
7988 
7989     for (unsigned i = 0; i < NumValues; ++i) {
7990       SDValue Add = CLI.DAG.getNode(ISD::ADD, CLI.DL, PtrVT, DemoteStackSlot,
7991                                     CLI.DAG.getConstant(Offsets[i], CLI.DL,
7992                                                         PtrVT), Flags);
7993       SDValue L = CLI.DAG.getLoad(
7994           RetTys[i], CLI.DL, CLI.Chain, Add,
7995           MachinePointerInfo::getFixedStack(CLI.DAG.getMachineFunction(),
7996                                             DemoteStackIdx, Offsets[i]),
7997           /* Alignment = */ 1);
7998       ReturnValues[i] = L;
7999       Chains[i] = L.getValue(1);
8000     }
8001 
8002     CLI.Chain = CLI.DAG.getNode(ISD::TokenFactor, CLI.DL, MVT::Other, Chains);
8003   } else {
8004     // Collect the legal value parts into potentially illegal values
8005     // that correspond to the original function's return values.
8006     Optional<ISD::NodeType> AssertOp;
8007     if (CLI.RetSExt)
8008       AssertOp = ISD::AssertSext;
8009     else if (CLI.RetZExt)
8010       AssertOp = ISD::AssertZext;
8011     unsigned CurReg = 0;
8012     for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
8013       EVT VT = RetTys[I];
8014       MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), VT);
8015       unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), VT);
8016 
8017       ReturnValues.push_back(getCopyFromParts(CLI.DAG, CLI.DL, &InVals[CurReg],
8018                                               NumRegs, RegisterVT, VT, nullptr,
8019                                               AssertOp));
8020       CurReg += NumRegs;
8021     }
8022 
8023     // For a function returning void, there is no return value. We can't create
8024     // such a node, so we just return a null return value in that case. In
8025     // that case, nothing will actually look at the value.
8026     if (ReturnValues.empty())
8027       return std::make_pair(SDValue(), CLI.Chain);
8028   }
8029 
8030   SDValue Res = CLI.DAG.getNode(ISD::MERGE_VALUES, CLI.DL,
8031                                 CLI.DAG.getVTList(RetTys), ReturnValues);
8032   return std::make_pair(Res, CLI.Chain);
8033 }
8034 
8035 void TargetLowering::LowerOperationWrapper(SDNode *N,
8036                                            SmallVectorImpl<SDValue> &Results,
8037                                            SelectionDAG &DAG) const {
8038   if (SDValue Res = LowerOperation(SDValue(N, 0), DAG))
8039     Results.push_back(Res);
8040 }
8041 
8042 SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
8043   llvm_unreachable("LowerOperation not implemented for this target!");
8044 }
8045 
8046 void
8047 SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) {
8048   SDValue Op = getNonRegisterValue(V);
8049   assert((Op.getOpcode() != ISD::CopyFromReg ||
8050           cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
8051          "Copy from a reg to the same reg!");
8052   assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg");
8053 
8054   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8055   RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg,
8056                    V->getType());
8057   SDValue Chain = DAG.getEntryNode();
8058 
8059   ISD::NodeType ExtendType = (FuncInfo.PreferredExtendType.find(V) ==
8060                               FuncInfo.PreferredExtendType.end())
8061                                  ? ISD::ANY_EXTEND
8062                                  : FuncInfo.PreferredExtendType[V];
8063   RFV.getCopyToRegs(Op, DAG, getCurSDLoc(), Chain, nullptr, V, ExtendType);
8064   PendingExports.push_back(Chain);
8065 }
8066 
8067 #include "llvm/CodeGen/SelectionDAGISel.h"
8068 
8069 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
8070 /// entry block, return true.  This includes arguments used by switches, since
8071 /// the switch may expand into multiple basic blocks.
8072 static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) {
8073   // With FastISel active, we may be splitting blocks, so force creation
8074   // of virtual registers for all non-dead arguments.
8075   if (FastISel)
8076     return A->use_empty();
8077 
8078   const BasicBlock &Entry = A->getParent()->front();
8079   for (const User *U : A->users())
8080     if (cast<Instruction>(U)->getParent() != &Entry || isa<SwitchInst>(U))
8081       return false;  // Use not in entry block.
8082 
8083   return true;
8084 }
8085 
8086 typedef DenseMap<const Argument *,
8087                  std::pair<const AllocaInst *, const StoreInst *>>
8088     ArgCopyElisionMapTy;
8089 
8090 /// Scan the entry block of the function in FuncInfo for arguments that look
8091 /// like copies into a local alloca. Record any copied arguments in
8092 /// ArgCopyElisionCandidates.
8093 static void
8094 findArgumentCopyElisionCandidates(const DataLayout &DL,
8095                                   FunctionLoweringInfo *FuncInfo,
8096                                   ArgCopyElisionMapTy &ArgCopyElisionCandidates) {
8097   // Record the state of every static alloca used in the entry block. Argument
8098   // allocas are all used in the entry block, so we need approximately as many
8099   // entries as we have arguments.
8100   enum StaticAllocaInfo { Unknown, Clobbered, Elidable };
8101   SmallDenseMap<const AllocaInst *, StaticAllocaInfo, 8> StaticAllocas;
8102   unsigned NumArgs = FuncInfo->Fn->arg_size();
8103   StaticAllocas.reserve(NumArgs * 2);
8104 
8105   auto GetInfoIfStaticAlloca = [&](const Value *V) -> StaticAllocaInfo * {
8106     if (!V)
8107       return nullptr;
8108     V = V->stripPointerCasts();
8109     const auto *AI = dyn_cast<AllocaInst>(V);
8110     if (!AI || !AI->isStaticAlloca() || !FuncInfo->StaticAllocaMap.count(AI))
8111       return nullptr;
8112     auto Iter = StaticAllocas.insert({AI, Unknown});
8113     return &Iter.first->second;
8114   };
8115 
8116   // Look for stores of arguments to static allocas. Look through bitcasts and
8117   // GEPs to handle type coercions, as long as the alloca is fully initialized
8118   // by the store. Any non-store use of an alloca escapes it and any subsequent
8119   // unanalyzed store might write it.
8120   // FIXME: Handle structs initialized with multiple stores.
8121   for (const Instruction &I : FuncInfo->Fn->getEntryBlock()) {
8122     // Look for stores, and handle non-store uses conservatively.
8123     const auto *SI = dyn_cast<StoreInst>(&I);
8124     if (!SI) {
8125       // We will look through cast uses, so ignore them completely.
8126       if (I.isCast())
8127         continue;
8128       // Ignore debug info intrinsics, they don't escape or store to allocas.
8129       if (isa<DbgInfoIntrinsic>(I))
8130         continue;
8131       // This is an unknown instruction. Assume it escapes or writes to all
8132       // static alloca operands.
8133       for (const Use &U : I.operands()) {
8134         if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(U))
8135           *Info = StaticAllocaInfo::Clobbered;
8136       }
8137       continue;
8138     }
8139 
8140     // If the stored value is a static alloca, mark it as escaped.
8141     if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(SI->getValueOperand()))
8142       *Info = StaticAllocaInfo::Clobbered;
8143 
8144     // Check if the destination is a static alloca.
8145     const Value *Dst = SI->getPointerOperand()->stripPointerCasts();
8146     StaticAllocaInfo *Info = GetInfoIfStaticAlloca(Dst);
8147     if (!Info)
8148       continue;
8149     const AllocaInst *AI = cast<AllocaInst>(Dst);
8150 
8151     // Skip allocas that have been initialized or clobbered.
8152     if (*Info != StaticAllocaInfo::Unknown)
8153       continue;
8154 
8155     // Check if the stored value is an argument, and that this store fully
8156     // initializes the alloca. Don't elide copies from the same argument twice.
8157     const Value *Val = SI->getValueOperand()->stripPointerCasts();
8158     const auto *Arg = dyn_cast<Argument>(Val);
8159     if (!Arg || Arg->hasInAllocaAttr() || Arg->hasByValAttr() ||
8160         Arg->getType()->isEmptyTy() ||
8161         DL.getTypeStoreSize(Arg->getType()) !=
8162             DL.getTypeAllocSize(AI->getAllocatedType()) ||
8163         ArgCopyElisionCandidates.count(Arg)) {
8164       *Info = StaticAllocaInfo::Clobbered;
8165       continue;
8166     }
8167 
8168     DEBUG(dbgs() << "Found argument copy elision candidate: " << *AI << '\n');
8169 
8170     // Mark this alloca and store for argument copy elision.
8171     *Info = StaticAllocaInfo::Elidable;
8172     ArgCopyElisionCandidates.insert({Arg, {AI, SI}});
8173 
8174     // Stop scanning if we've seen all arguments. This will happen early in -O0
8175     // builds, which is useful, because -O0 builds have large entry blocks and
8176     // many allocas.
8177     if (ArgCopyElisionCandidates.size() == NumArgs)
8178       break;
8179   }
8180 }
8181 
8182 /// Try to elide argument copies from memory into a local alloca. Succeeds if
8183 /// ArgVal is a load from a suitable fixed stack object.
8184 static void tryToElideArgumentCopy(
8185     FunctionLoweringInfo *FuncInfo, SmallVectorImpl<SDValue> &Chains,
8186     DenseMap<int, int> &ArgCopyElisionFrameIndexMap,
8187     SmallPtrSetImpl<const Instruction *> &ElidedArgCopyInstrs,
8188     ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg,
8189     SDValue ArgVal, bool &ArgHasUses) {
8190   // Check if this is a load from a fixed stack object.
8191   auto *LNode = dyn_cast<LoadSDNode>(ArgVal);
8192   if (!LNode)
8193     return;
8194   auto *FINode = dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode());
8195   if (!FINode)
8196     return;
8197 
8198   // Check that the fixed stack object is the right size and alignment.
8199   // Look at the alignment that the user wrote on the alloca instead of looking
8200   // at the stack object.
8201   auto ArgCopyIter = ArgCopyElisionCandidates.find(&Arg);
8202   assert(ArgCopyIter != ArgCopyElisionCandidates.end());
8203   const AllocaInst *AI = ArgCopyIter->second.first;
8204   int FixedIndex = FINode->getIndex();
8205   int &AllocaIndex = FuncInfo->StaticAllocaMap[AI];
8206   int OldIndex = AllocaIndex;
8207   MachineFrameInfo &MFI = FuncInfo->MF->getFrameInfo();
8208   if (MFI.getObjectSize(FixedIndex) != MFI.getObjectSize(OldIndex)) {
8209     DEBUG(dbgs() << "  argument copy elision failed due to bad fixed stack "
8210                     "object size\n");
8211     return;
8212   }
8213   unsigned RequiredAlignment = AI->getAlignment();
8214   if (!RequiredAlignment) {
8215     RequiredAlignment = FuncInfo->MF->getDataLayout().getABITypeAlignment(
8216         AI->getAllocatedType());
8217   }
8218   if (MFI.getObjectAlignment(FixedIndex) < RequiredAlignment) {
8219     DEBUG(dbgs() << "  argument copy elision failed: alignment of alloca "
8220                     "greater than stack argument alignment ("
8221                  << RequiredAlignment << " vs "
8222                  << MFI.getObjectAlignment(FixedIndex) << ")\n");
8223     return;
8224   }
8225 
8226   // Perform the elision. Delete the old stack object and replace its only use
8227   // in the variable info map. Mark the stack object as mutable.
8228   DEBUG({
8229     dbgs() << "Eliding argument copy from " << Arg << " to " << *AI << '\n'
8230            << "  Replacing frame index " << OldIndex << " with " << FixedIndex
8231            << '\n';
8232   });
8233   MFI.RemoveStackObject(OldIndex);
8234   MFI.setIsImmutableObjectIndex(FixedIndex, false);
8235   AllocaIndex = FixedIndex;
8236   ArgCopyElisionFrameIndexMap.insert({OldIndex, FixedIndex});
8237   Chains.push_back(ArgVal.getValue(1));
8238 
8239   // Avoid emitting code for the store implementing the copy.
8240   const StoreInst *SI = ArgCopyIter->second.second;
8241   ElidedArgCopyInstrs.insert(SI);
8242 
8243   // Check for uses of the argument again so that we can avoid exporting ArgVal
8244   // if it is't used by anything other than the store.
8245   for (const Value *U : Arg.users()) {
8246     if (U != SI) {
8247       ArgHasUses = true;
8248       break;
8249     }
8250   }
8251 }
8252 
8253 void SelectionDAGISel::LowerArguments(const Function &F) {
8254   SelectionDAG &DAG = SDB->DAG;
8255   SDLoc dl = SDB->getCurSDLoc();
8256   const DataLayout &DL = DAG.getDataLayout();
8257   SmallVector<ISD::InputArg, 16> Ins;
8258 
8259   if (!FuncInfo->CanLowerReturn) {
8260     // Put in an sret pointer parameter before all the other parameters.
8261     SmallVector<EVT, 1> ValueVTs;
8262     ComputeValueVTs(*TLI, DAG.getDataLayout(),
8263                     PointerType::getUnqual(F.getReturnType()), ValueVTs);
8264 
8265     // NOTE: Assuming that a pointer will never break down to more than one VT
8266     // or one register.
8267     ISD::ArgFlagsTy Flags;
8268     Flags.setSRet();
8269     MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVTs[0]);
8270     ISD::InputArg RetArg(Flags, RegisterVT, ValueVTs[0], true,
8271                          ISD::InputArg::NoArgIndex, 0);
8272     Ins.push_back(RetArg);
8273   }
8274 
8275   // Look for stores of arguments to static allocas. Mark such arguments with a
8276   // flag to ask the target to give us the memory location of that argument if
8277   // available.
8278   ArgCopyElisionMapTy ArgCopyElisionCandidates;
8279   findArgumentCopyElisionCandidates(DL, FuncInfo, ArgCopyElisionCandidates);
8280 
8281   // Set up the incoming argument description vector.
8282   for (const Argument &Arg : F.args()) {
8283     unsigned ArgNo = Arg.getArgNo();
8284     SmallVector<EVT, 4> ValueVTs;
8285     ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
8286     bool isArgValueUsed = !Arg.use_empty();
8287     unsigned PartBase = 0;
8288     Type *FinalType = Arg.getType();
8289     if (Arg.hasAttribute(Attribute::ByVal))
8290       FinalType = cast<PointerType>(FinalType)->getElementType();
8291     bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
8292         FinalType, F.getCallingConv(), F.isVarArg());
8293     for (unsigned Value = 0, NumValues = ValueVTs.size();
8294          Value != NumValues; ++Value) {
8295       EVT VT = ValueVTs[Value];
8296       Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
8297       ISD::ArgFlagsTy Flags;
8298       unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy);
8299 
8300       if (Arg.hasAttribute(Attribute::ZExt))
8301         Flags.setZExt();
8302       if (Arg.hasAttribute(Attribute::SExt))
8303         Flags.setSExt();
8304       if (Arg.hasAttribute(Attribute::InReg)) {
8305         // If we are using vectorcall calling convention, a structure that is
8306         // passed InReg - is surely an HVA
8307         if (F.getCallingConv() == CallingConv::X86_VectorCall &&
8308             isa<StructType>(Arg.getType())) {
8309           // The first value of a structure is marked
8310           if (0 == Value)
8311             Flags.setHvaStart();
8312           Flags.setHva();
8313         }
8314         // Set InReg Flag
8315         Flags.setInReg();
8316       }
8317       if (Arg.hasAttribute(Attribute::StructRet))
8318         Flags.setSRet();
8319       if (Arg.hasAttribute(Attribute::SwiftSelf))
8320         Flags.setSwiftSelf();
8321       if (Arg.hasAttribute(Attribute::SwiftError))
8322         Flags.setSwiftError();
8323       if (Arg.hasAttribute(Attribute::ByVal))
8324         Flags.setByVal();
8325       if (Arg.hasAttribute(Attribute::InAlloca)) {
8326         Flags.setInAlloca();
8327         // Set the byval flag for CCAssignFn callbacks that don't know about
8328         // inalloca.  This way we can know how many bytes we should've allocated
8329         // and how many bytes a callee cleanup function will pop.  If we port
8330         // inalloca to more targets, we'll have to add custom inalloca handling
8331         // in the various CC lowering callbacks.
8332         Flags.setByVal();
8333       }
8334       if (F.getCallingConv() == CallingConv::X86_INTR) {
8335         // IA Interrupt passes frame (1st parameter) by value in the stack.
8336         if (ArgNo == 0)
8337           Flags.setByVal();
8338       }
8339       if (Flags.isByVal() || Flags.isInAlloca()) {
8340         PointerType *Ty = cast<PointerType>(Arg.getType());
8341         Type *ElementTy = Ty->getElementType();
8342         Flags.setByValSize(DL.getTypeAllocSize(ElementTy));
8343         // For ByVal, alignment should be passed from FE.  BE will guess if
8344         // this info is not there but there are cases it cannot get right.
8345         unsigned FrameAlign;
8346         if (Arg.getParamAlignment())
8347           FrameAlign = Arg.getParamAlignment();
8348         else
8349           FrameAlign = TLI->getByValTypeAlignment(ElementTy, DL);
8350         Flags.setByValAlign(FrameAlign);
8351       }
8352       if (Arg.hasAttribute(Attribute::Nest))
8353         Flags.setNest();
8354       if (NeedsRegBlock)
8355         Flags.setInConsecutiveRegs();
8356       Flags.setOrigAlign(OriginalAlignment);
8357       if (ArgCopyElisionCandidates.count(&Arg))
8358         Flags.setCopyElisionCandidate();
8359 
8360       MVT RegisterVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
8361       unsigned NumRegs = TLI->getNumRegisters(*CurDAG->getContext(), VT);
8362       for (unsigned i = 0; i != NumRegs; ++i) {
8363         ISD::InputArg MyFlags(Flags, RegisterVT, VT, isArgValueUsed,
8364                               ArgNo, PartBase+i*RegisterVT.getStoreSize());
8365         if (NumRegs > 1 && i == 0)
8366           MyFlags.Flags.setSplit();
8367         // if it isn't first piece, alignment must be 1
8368         else if (i > 0) {
8369           MyFlags.Flags.setOrigAlign(1);
8370           if (i == NumRegs - 1)
8371             MyFlags.Flags.setSplitEnd();
8372         }
8373         Ins.push_back(MyFlags);
8374       }
8375       if (NeedsRegBlock && Value == NumValues - 1)
8376         Ins[Ins.size() - 1].Flags.setInConsecutiveRegsLast();
8377       PartBase += VT.getStoreSize();
8378     }
8379   }
8380 
8381   // Call the target to set up the argument values.
8382   SmallVector<SDValue, 8> InVals;
8383   SDValue NewRoot = TLI->LowerFormalArguments(
8384       DAG.getRoot(), F.getCallingConv(), F.isVarArg(), Ins, dl, DAG, InVals);
8385 
8386   // Verify that the target's LowerFormalArguments behaved as expected.
8387   assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other &&
8388          "LowerFormalArguments didn't return a valid chain!");
8389   assert(InVals.size() == Ins.size() &&
8390          "LowerFormalArguments didn't emit the correct number of values!");
8391   DEBUG({
8392       for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
8393         assert(InVals[i].getNode() &&
8394                "LowerFormalArguments emitted a null value!");
8395         assert(EVT(Ins[i].VT) == InVals[i].getValueType() &&
8396                "LowerFormalArguments emitted a value with the wrong type!");
8397       }
8398     });
8399 
8400   // Update the DAG with the new chain value resulting from argument lowering.
8401   DAG.setRoot(NewRoot);
8402 
8403   // Set up the argument values.
8404   unsigned i = 0;
8405   if (!FuncInfo->CanLowerReturn) {
8406     // Create a virtual register for the sret pointer, and put in a copy
8407     // from the sret argument into it.
8408     SmallVector<EVT, 1> ValueVTs;
8409     ComputeValueVTs(*TLI, DAG.getDataLayout(),
8410                     PointerType::getUnqual(F.getReturnType()), ValueVTs);
8411     MVT VT = ValueVTs[0].getSimpleVT();
8412     MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
8413     Optional<ISD::NodeType> AssertOp = None;
8414     SDValue ArgValue = getCopyFromParts(DAG, dl, &InVals[0], 1,
8415                                         RegVT, VT, nullptr, AssertOp);
8416 
8417     MachineFunction& MF = SDB->DAG.getMachineFunction();
8418     MachineRegisterInfo& RegInfo = MF.getRegInfo();
8419     unsigned SRetReg = RegInfo.createVirtualRegister(TLI->getRegClassFor(RegVT));
8420     FuncInfo->DemoteRegister = SRetReg;
8421     NewRoot =
8422         SDB->DAG.getCopyToReg(NewRoot, SDB->getCurSDLoc(), SRetReg, ArgValue);
8423     DAG.setRoot(NewRoot);
8424 
8425     // i indexes lowered arguments.  Bump it past the hidden sret argument.
8426     ++i;
8427   }
8428 
8429   SmallVector<SDValue, 4> Chains;
8430   DenseMap<int, int> ArgCopyElisionFrameIndexMap;
8431   for (const Argument &Arg : F.args()) {
8432     SmallVector<SDValue, 4> ArgValues;
8433     SmallVector<EVT, 4> ValueVTs;
8434     ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
8435     unsigned NumValues = ValueVTs.size();
8436     if (NumValues == 0)
8437       continue;
8438 
8439     bool ArgHasUses = !Arg.use_empty();
8440 
8441     // Elide the copying store if the target loaded this argument from a
8442     // suitable fixed stack object.
8443     if (Ins[i].Flags.isCopyElisionCandidate()) {
8444       tryToElideArgumentCopy(FuncInfo, Chains, ArgCopyElisionFrameIndexMap,
8445                              ElidedArgCopyInstrs, ArgCopyElisionCandidates, Arg,
8446                              InVals[i], ArgHasUses);
8447     }
8448 
8449     // If this argument is unused then remember its value. It is used to generate
8450     // debugging information.
8451     bool isSwiftErrorArg =
8452         TLI->supportSwiftError() &&
8453         Arg.hasAttribute(Attribute::SwiftError);
8454     if (!ArgHasUses && !isSwiftErrorArg) {
8455       SDB->setUnusedArgValue(&Arg, InVals[i]);
8456 
8457       // Also remember any frame index for use in FastISel.
8458       if (FrameIndexSDNode *FI =
8459           dyn_cast<FrameIndexSDNode>(InVals[i].getNode()))
8460         FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
8461     }
8462 
8463     for (unsigned Val = 0; Val != NumValues; ++Val) {
8464       EVT VT = ValueVTs[Val];
8465       MVT PartVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
8466       unsigned NumParts = TLI->getNumRegisters(*CurDAG->getContext(), VT);
8467 
8468       // Even an apparant 'unused' swifterror argument needs to be returned. So
8469       // we do generate a copy for it that can be used on return from the
8470       // function.
8471       if (ArgHasUses || isSwiftErrorArg) {
8472         Optional<ISD::NodeType> AssertOp;
8473         if (Arg.hasAttribute(Attribute::SExt))
8474           AssertOp = ISD::AssertSext;
8475         else if (Arg.hasAttribute(Attribute::ZExt))
8476           AssertOp = ISD::AssertZext;
8477 
8478         ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], NumParts,
8479                                              PartVT, VT, nullptr, AssertOp));
8480       }
8481 
8482       i += NumParts;
8483     }
8484 
8485     // We don't need to do anything else for unused arguments.
8486     if (ArgValues.empty())
8487       continue;
8488 
8489     // Note down frame index.
8490     if (FrameIndexSDNode *FI =
8491         dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode()))
8492       FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
8493 
8494     SDValue Res = DAG.getMergeValues(makeArrayRef(ArgValues.data(), NumValues),
8495                                      SDB->getCurSDLoc());
8496 
8497     SDB->setValue(&Arg, Res);
8498     if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::BUILD_PAIR) {
8499       if (LoadSDNode *LNode =
8500           dyn_cast<LoadSDNode>(Res.getOperand(0).getNode()))
8501         if (FrameIndexSDNode *FI =
8502             dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
8503         FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
8504     }
8505 
8506     // Update the SwiftErrorVRegDefMap.
8507     if (Res.getOpcode() == ISD::CopyFromReg && isSwiftErrorArg) {
8508       unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
8509       if (TargetRegisterInfo::isVirtualRegister(Reg))
8510         FuncInfo->setCurrentSwiftErrorVReg(FuncInfo->MBB,
8511                                            FuncInfo->SwiftErrorArg, Reg);
8512     }
8513 
8514     // If this argument is live outside of the entry block, insert a copy from
8515     // wherever we got it to the vreg that other BB's will reference it as.
8516     if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::CopyFromReg) {
8517       // If we can, though, try to skip creating an unnecessary vreg.
8518       // FIXME: This isn't very clean... it would be nice to make this more
8519       // general.  It's also subtly incompatible with the hacks FastISel
8520       // uses with vregs.
8521       unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
8522       if (TargetRegisterInfo::isVirtualRegister(Reg)) {
8523         FuncInfo->ValueMap[&Arg] = Reg;
8524         continue;
8525       }
8526     }
8527     if (!isOnlyUsedInEntryBlock(&Arg, TM.Options.EnableFastISel)) {
8528       FuncInfo->InitializeRegForValue(&Arg);
8529       SDB->CopyToExportRegsIfNeeded(&Arg);
8530     }
8531   }
8532 
8533   if (!Chains.empty()) {
8534     Chains.push_back(NewRoot);
8535     NewRoot = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
8536   }
8537 
8538   DAG.setRoot(NewRoot);
8539 
8540   assert(i == InVals.size() && "Argument register count mismatch!");
8541 
8542   // If any argument copy elisions occurred and we have debug info, update the
8543   // stale frame indices used in the dbg.declare variable info table.
8544   MachineFunction::VariableDbgInfoMapTy &DbgDeclareInfo = MF->getVariableDbgInfo();
8545   if (!DbgDeclareInfo.empty() && !ArgCopyElisionFrameIndexMap.empty()) {
8546     for (MachineFunction::VariableDbgInfo &VI : DbgDeclareInfo) {
8547       auto I = ArgCopyElisionFrameIndexMap.find(VI.Slot);
8548       if (I != ArgCopyElisionFrameIndexMap.end())
8549         VI.Slot = I->second;
8550     }
8551   }
8552 
8553   // Finally, if the target has anything special to do, allow it to do so.
8554   EmitFunctionEntryCode();
8555 }
8556 
8557 /// Handle PHI nodes in successor blocks.  Emit code into the SelectionDAG to
8558 /// ensure constants are generated when needed.  Remember the virtual registers
8559 /// that need to be added to the Machine PHI nodes as input.  We cannot just
8560 /// directly add them, because expansion might result in multiple MBB's for one
8561 /// BB.  As such, the start of the BB might correspond to a different MBB than
8562 /// the end.
8563 ///
8564 void
8565 SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
8566   const TerminatorInst *TI = LLVMBB->getTerminator();
8567 
8568   SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
8569 
8570   // Check PHI nodes in successors that expect a value to be available from this
8571   // block.
8572   for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
8573     const BasicBlock *SuccBB = TI->getSuccessor(succ);
8574     if (!isa<PHINode>(SuccBB->begin())) continue;
8575     MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
8576 
8577     // If this terminator has multiple identical successors (common for
8578     // switches), only handle each succ once.
8579     if (!SuccsHandled.insert(SuccMBB).second)
8580       continue;
8581 
8582     MachineBasicBlock::iterator MBBI = SuccMBB->begin();
8583 
8584     // At this point we know that there is a 1-1 correspondence between LLVM PHI
8585     // nodes and Machine PHI nodes, but the incoming operands have not been
8586     // emitted yet.
8587     for (BasicBlock::const_iterator I = SuccBB->begin();
8588          const PHINode *PN = dyn_cast<PHINode>(I); ++I) {
8589       // Ignore dead phi's.
8590       if (PN->use_empty()) continue;
8591 
8592       // Skip empty types
8593       if (PN->getType()->isEmptyTy())
8594         continue;
8595 
8596       unsigned Reg;
8597       const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
8598 
8599       if (const Constant *C = dyn_cast<Constant>(PHIOp)) {
8600         unsigned &RegOut = ConstantsOut[C];
8601         if (RegOut == 0) {
8602           RegOut = FuncInfo.CreateRegs(C->getType());
8603           CopyValueToVirtualRegister(C, RegOut);
8604         }
8605         Reg = RegOut;
8606       } else {
8607         DenseMap<const Value *, unsigned>::iterator I =
8608           FuncInfo.ValueMap.find(PHIOp);
8609         if (I != FuncInfo.ValueMap.end())
8610           Reg = I->second;
8611         else {
8612           assert(isa<AllocaInst>(PHIOp) &&
8613                  FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
8614                  "Didn't codegen value into a register!??");
8615           Reg = FuncInfo.CreateRegs(PHIOp->getType());
8616           CopyValueToVirtualRegister(PHIOp, Reg);
8617         }
8618       }
8619 
8620       // Remember that this register needs to added to the machine PHI node as
8621       // the input for this MBB.
8622       SmallVector<EVT, 4> ValueVTs;
8623       const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8624       ComputeValueVTs(TLI, DAG.getDataLayout(), PN->getType(), ValueVTs);
8625       for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
8626         EVT VT = ValueVTs[vti];
8627         unsigned NumRegisters = TLI.getNumRegisters(*DAG.getContext(), VT);
8628         for (unsigned i = 0, e = NumRegisters; i != e; ++i)
8629           FuncInfo.PHINodesToUpdate.push_back(
8630               std::make_pair(&*MBBI++, Reg + i));
8631         Reg += NumRegisters;
8632       }
8633     }
8634   }
8635 
8636   ConstantsOut.clear();
8637 }
8638 
8639 /// Add a successor MBB to ParentMBB< creating a new MachineBB for BB if SuccMBB
8640 /// is 0.
8641 MachineBasicBlock *
8642 SelectionDAGBuilder::StackProtectorDescriptor::
8643 AddSuccessorMBB(const BasicBlock *BB,
8644                 MachineBasicBlock *ParentMBB,
8645                 bool IsLikely,
8646                 MachineBasicBlock *SuccMBB) {
8647   // If SuccBB has not been created yet, create it.
8648   if (!SuccMBB) {
8649     MachineFunction *MF = ParentMBB->getParent();
8650     MachineFunction::iterator BBI(ParentMBB);
8651     SuccMBB = MF->CreateMachineBasicBlock(BB);
8652     MF->insert(++BBI, SuccMBB);
8653   }
8654   // Add it as a successor of ParentMBB.
8655   ParentMBB->addSuccessor(
8656       SuccMBB, BranchProbabilityInfo::getBranchProbStackProtector(IsLikely));
8657   return SuccMBB;
8658 }
8659 
8660 MachineBasicBlock *SelectionDAGBuilder::NextBlock(MachineBasicBlock *MBB) {
8661   MachineFunction::iterator I(MBB);
8662   if (++I == FuncInfo.MF->end())
8663     return nullptr;
8664   return &*I;
8665 }
8666 
8667 /// During lowering new call nodes can be created (such as memset, etc.).
8668 /// Those will become new roots of the current DAG, but complications arise
8669 /// when they are tail calls. In such cases, the call lowering will update
8670 /// the root, but the builder still needs to know that a tail call has been
8671 /// lowered in order to avoid generating an additional return.
8672 void SelectionDAGBuilder::updateDAGForMaybeTailCall(SDValue MaybeTC) {
8673   // If the node is null, we do have a tail call.
8674   if (MaybeTC.getNode() != nullptr)
8675     DAG.setRoot(MaybeTC);
8676   else
8677     HasTailCall = true;
8678 }
8679 
8680 uint64_t
8681 SelectionDAGBuilder::getJumpTableRange(const CaseClusterVector &Clusters,
8682                                        unsigned First, unsigned Last) const {
8683   assert(Last >= First);
8684   const APInt &LowCase = Clusters[First].Low->getValue();
8685   const APInt &HighCase = Clusters[Last].High->getValue();
8686   assert(LowCase.getBitWidth() == HighCase.getBitWidth());
8687 
8688   // FIXME: A range of consecutive cases has 100% density, but only requires one
8689   // comparison to lower. We should discriminate against such consecutive ranges
8690   // in jump tables.
8691 
8692   return (HighCase - LowCase).getLimitedValue((UINT64_MAX - 1) / 100) + 1;
8693 }
8694 
8695 uint64_t SelectionDAGBuilder::getJumpTableNumCases(
8696     const SmallVectorImpl<unsigned> &TotalCases, unsigned First,
8697     unsigned Last) const {
8698   assert(Last >= First);
8699   assert(TotalCases[Last] >= TotalCases[First]);
8700   uint64_t NumCases =
8701       TotalCases[Last] - (First == 0 ? 0 : TotalCases[First - 1]);
8702   return NumCases;
8703 }
8704 
8705 bool SelectionDAGBuilder::buildJumpTable(const CaseClusterVector &Clusters,
8706                                          unsigned First, unsigned Last,
8707                                          const SwitchInst *SI,
8708                                          MachineBasicBlock *DefaultMBB,
8709                                          CaseCluster &JTCluster) {
8710   assert(First <= Last);
8711 
8712   auto Prob = BranchProbability::getZero();
8713   unsigned NumCmps = 0;
8714   std::vector<MachineBasicBlock*> Table;
8715   DenseMap<MachineBasicBlock*, BranchProbability> JTProbs;
8716 
8717   // Initialize probabilities in JTProbs.
8718   for (unsigned I = First; I <= Last; ++I)
8719     JTProbs[Clusters[I].MBB] = BranchProbability::getZero();
8720 
8721   for (unsigned I = First; I <= Last; ++I) {
8722     assert(Clusters[I].Kind == CC_Range);
8723     Prob += Clusters[I].Prob;
8724     const APInt &Low = Clusters[I].Low->getValue();
8725     const APInt &High = Clusters[I].High->getValue();
8726     NumCmps += (Low == High) ? 1 : 2;
8727     if (I != First) {
8728       // Fill the gap between this and the previous cluster.
8729       const APInt &PreviousHigh = Clusters[I - 1].High->getValue();
8730       assert(PreviousHigh.slt(Low));
8731       uint64_t Gap = (Low - PreviousHigh).getLimitedValue() - 1;
8732       for (uint64_t J = 0; J < Gap; J++)
8733         Table.push_back(DefaultMBB);
8734     }
8735     uint64_t ClusterSize = (High - Low).getLimitedValue() + 1;
8736     for (uint64_t J = 0; J < ClusterSize; ++J)
8737       Table.push_back(Clusters[I].MBB);
8738     JTProbs[Clusters[I].MBB] += Clusters[I].Prob;
8739   }
8740 
8741   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8742   unsigned NumDests = JTProbs.size();
8743   if (TLI.isSuitableForBitTests(
8744           NumDests, NumCmps, Clusters[First].Low->getValue(),
8745           Clusters[Last].High->getValue(), DAG.getDataLayout())) {
8746     // Clusters[First..Last] should be lowered as bit tests instead.
8747     return false;
8748   }
8749 
8750   // Create the MBB that will load from and jump through the table.
8751   // Note: We create it here, but it's not inserted into the function yet.
8752   MachineFunction *CurMF = FuncInfo.MF;
8753   MachineBasicBlock *JumpTableMBB =
8754       CurMF->CreateMachineBasicBlock(SI->getParent());
8755 
8756   // Add successors. Note: use table order for determinism.
8757   SmallPtrSet<MachineBasicBlock *, 8> Done;
8758   for (MachineBasicBlock *Succ : Table) {
8759     if (Done.count(Succ))
8760       continue;
8761     addSuccessorWithProb(JumpTableMBB, Succ, JTProbs[Succ]);
8762     Done.insert(Succ);
8763   }
8764   JumpTableMBB->normalizeSuccProbs();
8765 
8766   unsigned JTI = CurMF->getOrCreateJumpTableInfo(TLI.getJumpTableEncoding())
8767                      ->createJumpTableIndex(Table);
8768 
8769   // Set up the jump table info.
8770   JumpTable JT(-1U, JTI, JumpTableMBB, nullptr);
8771   JumpTableHeader JTH(Clusters[First].Low->getValue(),
8772                       Clusters[Last].High->getValue(), SI->getCondition(),
8773                       nullptr, false);
8774   JTCases.emplace_back(std::move(JTH), std::move(JT));
8775 
8776   JTCluster = CaseCluster::jumpTable(Clusters[First].Low, Clusters[Last].High,
8777                                      JTCases.size() - 1, Prob);
8778   return true;
8779 }
8780 
8781 void SelectionDAGBuilder::findJumpTables(CaseClusterVector &Clusters,
8782                                          const SwitchInst *SI,
8783                                          MachineBasicBlock *DefaultMBB) {
8784 #ifndef NDEBUG
8785   // Clusters must be non-empty, sorted, and only contain Range clusters.
8786   assert(!Clusters.empty());
8787   for (CaseCluster &C : Clusters)
8788     assert(C.Kind == CC_Range);
8789   for (unsigned i = 1, e = Clusters.size(); i < e; ++i)
8790     assert(Clusters[i - 1].High->getValue().slt(Clusters[i].Low->getValue()));
8791 #endif
8792 
8793   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8794   if (!TLI.areJTsAllowed(SI->getParent()->getParent()))
8795     return;
8796 
8797   const int64_t N = Clusters.size();
8798   const unsigned MinJumpTableEntries = TLI.getMinimumJumpTableEntries();
8799   const unsigned SmallNumberOfEntries = MinJumpTableEntries / 2;
8800 
8801   if (N < 2 || N < MinJumpTableEntries)
8802     return;
8803 
8804   // TotalCases[i]: Total nbr of cases in Clusters[0..i].
8805   SmallVector<unsigned, 8> TotalCases(N);
8806   for (unsigned i = 0; i < N; ++i) {
8807     const APInt &Hi = Clusters[i].High->getValue();
8808     const APInt &Lo = Clusters[i].Low->getValue();
8809     TotalCases[i] = (Hi - Lo).getLimitedValue() + 1;
8810     if (i != 0)
8811       TotalCases[i] += TotalCases[i - 1];
8812   }
8813 
8814   // Cheap case: the whole range may be suitable for jump table.
8815   uint64_t Range = getJumpTableRange(Clusters,0, N - 1);
8816   uint64_t NumCases = getJumpTableNumCases(TotalCases, 0, N - 1);
8817   assert(NumCases < UINT64_MAX / 100);
8818   assert(Range >= NumCases);
8819   if (TLI.isSuitableForJumpTable(SI, NumCases, Range)) {
8820     CaseCluster JTCluster;
8821     if (buildJumpTable(Clusters, 0, N - 1, SI, DefaultMBB, JTCluster)) {
8822       Clusters[0] = JTCluster;
8823       Clusters.resize(1);
8824       return;
8825     }
8826   }
8827 
8828   // The algorithm below is not suitable for -O0.
8829   if (TM.getOptLevel() == CodeGenOpt::None)
8830     return;
8831 
8832   // Split Clusters into minimum number of dense partitions. The algorithm uses
8833   // the same idea as Kannan & Proebsting "Correction to 'Producing Good Code
8834   // for the Case Statement'" (1994), but builds the MinPartitions array in
8835   // reverse order to make it easier to reconstruct the partitions in ascending
8836   // order. In the choice between two optimal partitionings, it picks the one
8837   // which yields more jump tables.
8838 
8839   // MinPartitions[i] is the minimum nbr of partitions of Clusters[i..N-1].
8840   SmallVector<unsigned, 8> MinPartitions(N);
8841   // LastElement[i] is the last element of the partition starting at i.
8842   SmallVector<unsigned, 8> LastElement(N);
8843   // PartitionsScore[i] is used to break ties when choosing between two
8844   // partitionings resulting in the same number of partitions.
8845   SmallVector<unsigned, 8> PartitionsScore(N);
8846   // For PartitionsScore, a small number of comparisons is considered as good as
8847   // a jump table and a single comparison is considered better than a jump
8848   // table.
8849   enum PartitionScores : unsigned {
8850     NoTable = 0,
8851     Table = 1,
8852     FewCases = 1,
8853     SingleCase = 2
8854   };
8855 
8856   // Base case: There is only one way to partition Clusters[N-1].
8857   MinPartitions[N - 1] = 1;
8858   LastElement[N - 1] = N - 1;
8859   PartitionsScore[N - 1] = PartitionScores::SingleCase;
8860 
8861   // Note: loop indexes are signed to avoid underflow.
8862   for (int64_t i = N - 2; i >= 0; i--) {
8863     // Find optimal partitioning of Clusters[i..N-1].
8864     // Baseline: Put Clusters[i] into a partition on its own.
8865     MinPartitions[i] = MinPartitions[i + 1] + 1;
8866     LastElement[i] = i;
8867     PartitionsScore[i] = PartitionsScore[i + 1] + PartitionScores::SingleCase;
8868 
8869     // Search for a solution that results in fewer partitions.
8870     for (int64_t j = N - 1; j > i; j--) {
8871       // Try building a partition from Clusters[i..j].
8872       uint64_t Range = getJumpTableRange(Clusters, i, j);
8873       uint64_t NumCases = getJumpTableNumCases(TotalCases, i, j);
8874       assert(NumCases < UINT64_MAX / 100);
8875       assert(Range >= NumCases);
8876       if (TLI.isSuitableForJumpTable(SI, NumCases, Range)) {
8877         unsigned NumPartitions = 1 + (j == N - 1 ? 0 : MinPartitions[j + 1]);
8878         unsigned Score = j == N - 1 ? 0 : PartitionsScore[j + 1];
8879         int64_t NumEntries = j - i + 1;
8880 
8881         if (NumEntries == 1)
8882           Score += PartitionScores::SingleCase;
8883         else if (NumEntries <= SmallNumberOfEntries)
8884           Score += PartitionScores::FewCases;
8885         else if (NumEntries >= MinJumpTableEntries)
8886           Score += PartitionScores::Table;
8887 
8888         // If this leads to fewer partitions, or to the same number of
8889         // partitions with better score, it is a better partitioning.
8890         if (NumPartitions < MinPartitions[i] ||
8891             (NumPartitions == MinPartitions[i] && Score > PartitionsScore[i])) {
8892           MinPartitions[i] = NumPartitions;
8893           LastElement[i] = j;
8894           PartitionsScore[i] = Score;
8895         }
8896       }
8897     }
8898   }
8899 
8900   // Iterate over the partitions, replacing some with jump tables in-place.
8901   unsigned DstIndex = 0;
8902   for (unsigned First = 0, Last; First < N; First = Last + 1) {
8903     Last = LastElement[First];
8904     assert(Last >= First);
8905     assert(DstIndex <= First);
8906     unsigned NumClusters = Last - First + 1;
8907 
8908     CaseCluster JTCluster;
8909     if (NumClusters >= MinJumpTableEntries &&
8910         buildJumpTable(Clusters, First, Last, SI, DefaultMBB, JTCluster)) {
8911       Clusters[DstIndex++] = JTCluster;
8912     } else {
8913       for (unsigned I = First; I <= Last; ++I)
8914         std::memmove(&Clusters[DstIndex++], &Clusters[I], sizeof(Clusters[I]));
8915     }
8916   }
8917   Clusters.resize(DstIndex);
8918 }
8919 
8920 bool SelectionDAGBuilder::buildBitTests(CaseClusterVector &Clusters,
8921                                         unsigned First, unsigned Last,
8922                                         const SwitchInst *SI,
8923                                         CaseCluster &BTCluster) {
8924   assert(First <= Last);
8925   if (First == Last)
8926     return false;
8927 
8928   BitVector Dests(FuncInfo.MF->getNumBlockIDs());
8929   unsigned NumCmps = 0;
8930   for (int64_t I = First; I <= Last; ++I) {
8931     assert(Clusters[I].Kind == CC_Range);
8932     Dests.set(Clusters[I].MBB->getNumber());
8933     NumCmps += (Clusters[I].Low == Clusters[I].High) ? 1 : 2;
8934   }
8935   unsigned NumDests = Dests.count();
8936 
8937   APInt Low = Clusters[First].Low->getValue();
8938   APInt High = Clusters[Last].High->getValue();
8939   assert(Low.slt(High));
8940 
8941   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8942   const DataLayout &DL = DAG.getDataLayout();
8943   if (!TLI.isSuitableForBitTests(NumDests, NumCmps, Low, High, DL))
8944     return false;
8945 
8946   APInt LowBound;
8947   APInt CmpRange;
8948 
8949   const int BitWidth = TLI.getPointerTy(DL).getSizeInBits();
8950   assert(TLI.rangeFitsInWord(Low, High, DL) &&
8951          "Case range must fit in bit mask!");
8952 
8953   // Check if the clusters cover a contiguous range such that no value in the
8954   // range will jump to the default statement.
8955   bool ContiguousRange = true;
8956   for (int64_t I = First + 1; I <= Last; ++I) {
8957     if (Clusters[I].Low->getValue() != Clusters[I - 1].High->getValue() + 1) {
8958       ContiguousRange = false;
8959       break;
8960     }
8961   }
8962 
8963   if (Low.isStrictlyPositive() && High.slt(BitWidth)) {
8964     // Optimize the case where all the case values fit in a word without having
8965     // to subtract minValue. In this case, we can optimize away the subtraction.
8966     LowBound = APInt::getNullValue(Low.getBitWidth());
8967     CmpRange = High;
8968     ContiguousRange = false;
8969   } else {
8970     LowBound = Low;
8971     CmpRange = High - Low;
8972   }
8973 
8974   CaseBitsVector CBV;
8975   auto TotalProb = BranchProbability::getZero();
8976   for (unsigned i = First; i <= Last; ++i) {
8977     // Find the CaseBits for this destination.
8978     unsigned j;
8979     for (j = 0; j < CBV.size(); ++j)
8980       if (CBV[j].BB == Clusters[i].MBB)
8981         break;
8982     if (j == CBV.size())
8983       CBV.push_back(
8984           CaseBits(0, Clusters[i].MBB, 0, BranchProbability::getZero()));
8985     CaseBits *CB = &CBV[j];
8986 
8987     // Update Mask, Bits and ExtraProb.
8988     uint64_t Lo = (Clusters[i].Low->getValue() - LowBound).getZExtValue();
8989     uint64_t Hi = (Clusters[i].High->getValue() - LowBound).getZExtValue();
8990     assert(Hi >= Lo && Hi < 64 && "Invalid bit case!");
8991     CB->Mask |= (-1ULL >> (63 - (Hi - Lo))) << Lo;
8992     CB->Bits += Hi - Lo + 1;
8993     CB->ExtraProb += Clusters[i].Prob;
8994     TotalProb += Clusters[i].Prob;
8995   }
8996 
8997   BitTestInfo BTI;
8998   std::sort(CBV.begin(), CBV.end(), [](const CaseBits &a, const CaseBits &b) {
8999     // Sort by probability first, number of bits second.
9000     if (a.ExtraProb != b.ExtraProb)
9001       return a.ExtraProb > b.ExtraProb;
9002     return a.Bits > b.Bits;
9003   });
9004 
9005   for (auto &CB : CBV) {
9006     MachineBasicBlock *BitTestBB =
9007         FuncInfo.MF->CreateMachineBasicBlock(SI->getParent());
9008     BTI.push_back(BitTestCase(CB.Mask, BitTestBB, CB.BB, CB.ExtraProb));
9009   }
9010   BitTestCases.emplace_back(std::move(LowBound), std::move(CmpRange),
9011                             SI->getCondition(), -1U, MVT::Other, false,
9012                             ContiguousRange, nullptr, nullptr, std::move(BTI),
9013                             TotalProb);
9014 
9015   BTCluster = CaseCluster::bitTests(Clusters[First].Low, Clusters[Last].High,
9016                                     BitTestCases.size() - 1, TotalProb);
9017   return true;
9018 }
9019 
9020 void SelectionDAGBuilder::findBitTestClusters(CaseClusterVector &Clusters,
9021                                               const SwitchInst *SI) {
9022 // Partition Clusters into as few subsets as possible, where each subset has a
9023 // range that fits in a machine word and has <= 3 unique destinations.
9024 
9025 #ifndef NDEBUG
9026   // Clusters must be sorted and contain Range or JumpTable clusters.
9027   assert(!Clusters.empty());
9028   assert(Clusters[0].Kind == CC_Range || Clusters[0].Kind == CC_JumpTable);
9029   for (const CaseCluster &C : Clusters)
9030     assert(C.Kind == CC_Range || C.Kind == CC_JumpTable);
9031   for (unsigned i = 1; i < Clusters.size(); ++i)
9032     assert(Clusters[i-1].High->getValue().slt(Clusters[i].Low->getValue()));
9033 #endif
9034 
9035   // The algorithm below is not suitable for -O0.
9036   if (TM.getOptLevel() == CodeGenOpt::None)
9037     return;
9038 
9039   // If target does not have legal shift left, do not emit bit tests at all.
9040   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9041   const DataLayout &DL = DAG.getDataLayout();
9042 
9043   EVT PTy = TLI.getPointerTy(DL);
9044   if (!TLI.isOperationLegal(ISD::SHL, PTy))
9045     return;
9046 
9047   int BitWidth = PTy.getSizeInBits();
9048   const int64_t N = Clusters.size();
9049 
9050   // MinPartitions[i] is the minimum nbr of partitions of Clusters[i..N-1].
9051   SmallVector<unsigned, 8> MinPartitions(N);
9052   // LastElement[i] is the last element of the partition starting at i.
9053   SmallVector<unsigned, 8> LastElement(N);
9054 
9055   // FIXME: This might not be the best algorithm for finding bit test clusters.
9056 
9057   // Base case: There is only one way to partition Clusters[N-1].
9058   MinPartitions[N - 1] = 1;
9059   LastElement[N - 1] = N - 1;
9060 
9061   // Note: loop indexes are signed to avoid underflow.
9062   for (int64_t i = N - 2; i >= 0; --i) {
9063     // Find optimal partitioning of Clusters[i..N-1].
9064     // Baseline: Put Clusters[i] into a partition on its own.
9065     MinPartitions[i] = MinPartitions[i + 1] + 1;
9066     LastElement[i] = i;
9067 
9068     // Search for a solution that results in fewer partitions.
9069     // Note: the search is limited by BitWidth, reducing time complexity.
9070     for (int64_t j = std::min(N - 1, i + BitWidth - 1); j > i; --j) {
9071       // Try building a partition from Clusters[i..j].
9072 
9073       // Check the range.
9074       if (!TLI.rangeFitsInWord(Clusters[i].Low->getValue(),
9075                                Clusters[j].High->getValue(), DL))
9076         continue;
9077 
9078       // Check nbr of destinations and cluster types.
9079       // FIXME: This works, but doesn't seem very efficient.
9080       bool RangesOnly = true;
9081       BitVector Dests(FuncInfo.MF->getNumBlockIDs());
9082       for (int64_t k = i; k <= j; k++) {
9083         if (Clusters[k].Kind != CC_Range) {
9084           RangesOnly = false;
9085           break;
9086         }
9087         Dests.set(Clusters[k].MBB->getNumber());
9088       }
9089       if (!RangesOnly || Dests.count() > 3)
9090         break;
9091 
9092       // Check if it's a better partition.
9093       unsigned NumPartitions = 1 + (j == N - 1 ? 0 : MinPartitions[j + 1]);
9094       if (NumPartitions < MinPartitions[i]) {
9095         // Found a better partition.
9096         MinPartitions[i] = NumPartitions;
9097         LastElement[i] = j;
9098       }
9099     }
9100   }
9101 
9102   // Iterate over the partitions, replacing with bit-test clusters in-place.
9103   unsigned DstIndex = 0;
9104   for (unsigned First = 0, Last; First < N; First = Last + 1) {
9105     Last = LastElement[First];
9106     assert(First <= Last);
9107     assert(DstIndex <= First);
9108 
9109     CaseCluster BitTestCluster;
9110     if (buildBitTests(Clusters, First, Last, SI, BitTestCluster)) {
9111       Clusters[DstIndex++] = BitTestCluster;
9112     } else {
9113       size_t NumClusters = Last - First + 1;
9114       std::memmove(&Clusters[DstIndex], &Clusters[First],
9115                    sizeof(Clusters[0]) * NumClusters);
9116       DstIndex += NumClusters;
9117     }
9118   }
9119   Clusters.resize(DstIndex);
9120 }
9121 
9122 void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W, Value *Cond,
9123                                         MachineBasicBlock *SwitchMBB,
9124                                         MachineBasicBlock *DefaultMBB) {
9125   MachineFunction *CurMF = FuncInfo.MF;
9126   MachineBasicBlock *NextMBB = nullptr;
9127   MachineFunction::iterator BBI(W.MBB);
9128   if (++BBI != FuncInfo.MF->end())
9129     NextMBB = &*BBI;
9130 
9131   unsigned Size = W.LastCluster - W.FirstCluster + 1;
9132 
9133   BranchProbabilityInfo *BPI = FuncInfo.BPI;
9134 
9135   if (Size == 2 && W.MBB == SwitchMBB) {
9136     // If any two of the cases has the same destination, and if one value
9137     // is the same as the other, but has one bit unset that the other has set,
9138     // use bit manipulation to do two compares at once.  For example:
9139     // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
9140     // TODO: This could be extended to merge any 2 cases in switches with 3
9141     // cases.
9142     // TODO: Handle cases where W.CaseBB != SwitchBB.
9143     CaseCluster &Small = *W.FirstCluster;
9144     CaseCluster &Big = *W.LastCluster;
9145 
9146     if (Small.Low == Small.High && Big.Low == Big.High &&
9147         Small.MBB == Big.MBB) {
9148       const APInt &SmallValue = Small.Low->getValue();
9149       const APInt &BigValue = Big.Low->getValue();
9150 
9151       // Check that there is only one bit different.
9152       APInt CommonBit = BigValue ^ SmallValue;
9153       if (CommonBit.isPowerOf2()) {
9154         SDValue CondLHS = getValue(Cond);
9155         EVT VT = CondLHS.getValueType();
9156         SDLoc DL = getCurSDLoc();
9157 
9158         SDValue Or = DAG.getNode(ISD::OR, DL, VT, CondLHS,
9159                                  DAG.getConstant(CommonBit, DL, VT));
9160         SDValue Cond = DAG.getSetCC(
9161             DL, MVT::i1, Or, DAG.getConstant(BigValue | SmallValue, DL, VT),
9162             ISD::SETEQ);
9163 
9164         // Update successor info.
9165         // Both Small and Big will jump to Small.BB, so we sum up the
9166         // probabilities.
9167         addSuccessorWithProb(SwitchMBB, Small.MBB, Small.Prob + Big.Prob);
9168         if (BPI)
9169           addSuccessorWithProb(
9170               SwitchMBB, DefaultMBB,
9171               // The default destination is the first successor in IR.
9172               BPI->getEdgeProbability(SwitchMBB->getBasicBlock(), (unsigned)0));
9173         else
9174           addSuccessorWithProb(SwitchMBB, DefaultMBB);
9175 
9176         // Insert the true branch.
9177         SDValue BrCond =
9178             DAG.getNode(ISD::BRCOND, DL, MVT::Other, getControlRoot(), Cond,
9179                         DAG.getBasicBlock(Small.MBB));
9180         // Insert the false branch.
9181         BrCond = DAG.getNode(ISD::BR, DL, MVT::Other, BrCond,
9182                              DAG.getBasicBlock(DefaultMBB));
9183 
9184         DAG.setRoot(BrCond);
9185         return;
9186       }
9187     }
9188   }
9189 
9190   if (TM.getOptLevel() != CodeGenOpt::None) {
9191     // Order cases by probability so the most likely case will be checked first.
9192     std::sort(W.FirstCluster, W.LastCluster + 1,
9193               [](const CaseCluster &a, const CaseCluster &b) {
9194       return a.Prob > b.Prob;
9195     });
9196 
9197     // Rearrange the case blocks so that the last one falls through if possible
9198     // without without changing the order of probabilities.
9199     for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster; ) {
9200       --I;
9201       if (I->Prob > W.LastCluster->Prob)
9202         break;
9203       if (I->Kind == CC_Range && I->MBB == NextMBB) {
9204         std::swap(*I, *W.LastCluster);
9205         break;
9206       }
9207     }
9208   }
9209 
9210   // Compute total probability.
9211   BranchProbability DefaultProb = W.DefaultProb;
9212   BranchProbability UnhandledProbs = DefaultProb;
9213   for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
9214     UnhandledProbs += I->Prob;
9215 
9216   MachineBasicBlock *CurMBB = W.MBB;
9217   for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
9218     MachineBasicBlock *Fallthrough;
9219     if (I == W.LastCluster) {
9220       // For the last cluster, fall through to the default destination.
9221       Fallthrough = DefaultMBB;
9222     } else {
9223       Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
9224       CurMF->insert(BBI, Fallthrough);
9225       // Put Cond in a virtual register to make it available from the new blocks.
9226       ExportFromCurrentBlock(Cond);
9227     }
9228     UnhandledProbs -= I->Prob;
9229 
9230     switch (I->Kind) {
9231       case CC_JumpTable: {
9232         // FIXME: Optimize away range check based on pivot comparisons.
9233         JumpTableHeader *JTH = &JTCases[I->JTCasesIndex].first;
9234         JumpTable *JT = &JTCases[I->JTCasesIndex].second;
9235 
9236         // The jump block hasn't been inserted yet; insert it here.
9237         MachineBasicBlock *JumpMBB = JT->MBB;
9238         CurMF->insert(BBI, JumpMBB);
9239 
9240         auto JumpProb = I->Prob;
9241         auto FallthroughProb = UnhandledProbs;
9242 
9243         // If the default statement is a target of the jump table, we evenly
9244         // distribute the default probability to successors of CurMBB. Also
9245         // update the probability on the edge from JumpMBB to Fallthrough.
9246         for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
9247                                               SE = JumpMBB->succ_end();
9248              SI != SE; ++SI) {
9249           if (*SI == DefaultMBB) {
9250             JumpProb += DefaultProb / 2;
9251             FallthroughProb -= DefaultProb / 2;
9252             JumpMBB->setSuccProbability(SI, DefaultProb / 2);
9253             JumpMBB->normalizeSuccProbs();
9254             break;
9255           }
9256         }
9257 
9258         addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
9259         addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
9260         CurMBB->normalizeSuccProbs();
9261 
9262         // The jump table header will be inserted in our current block, do the
9263         // range check, and fall through to our fallthrough block.
9264         JTH->HeaderBB = CurMBB;
9265         JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
9266 
9267         // If we're in the right place, emit the jump table header right now.
9268         if (CurMBB == SwitchMBB) {
9269           visitJumpTableHeader(*JT, *JTH, SwitchMBB);
9270           JTH->Emitted = true;
9271         }
9272         break;
9273       }
9274       case CC_BitTests: {
9275         // FIXME: Optimize away range check based on pivot comparisons.
9276         BitTestBlock *BTB = &BitTestCases[I->BTCasesIndex];
9277 
9278         // The bit test blocks haven't been inserted yet; insert them here.
9279         for (BitTestCase &BTC : BTB->Cases)
9280           CurMF->insert(BBI, BTC.ThisBB);
9281 
9282         // Fill in fields of the BitTestBlock.
9283         BTB->Parent = CurMBB;
9284         BTB->Default = Fallthrough;
9285 
9286         BTB->DefaultProb = UnhandledProbs;
9287         // If the cases in bit test don't form a contiguous range, we evenly
9288         // distribute the probability on the edge to Fallthrough to two
9289         // successors of CurMBB.
9290         if (!BTB->ContiguousRange) {
9291           BTB->Prob += DefaultProb / 2;
9292           BTB->DefaultProb -= DefaultProb / 2;
9293         }
9294 
9295         // If we're in the right place, emit the bit test header right now.
9296         if (CurMBB == SwitchMBB) {
9297           visitBitTestHeader(*BTB, SwitchMBB);
9298           BTB->Emitted = true;
9299         }
9300         break;
9301       }
9302       case CC_Range: {
9303         const Value *RHS, *LHS, *MHS;
9304         ISD::CondCode CC;
9305         if (I->Low == I->High) {
9306           // Check Cond == I->Low.
9307           CC = ISD::SETEQ;
9308           LHS = Cond;
9309           RHS=I->Low;
9310           MHS = nullptr;
9311         } else {
9312           // Check I->Low <= Cond <= I->High.
9313           CC = ISD::SETLE;
9314           LHS = I->Low;
9315           MHS = Cond;
9316           RHS = I->High;
9317         }
9318 
9319         // The false probability is the sum of all unhandled cases.
9320         CaseBlock CB(CC, LHS, RHS, MHS, I->MBB, Fallthrough, CurMBB, I->Prob,
9321                      UnhandledProbs);
9322 
9323         if (CurMBB == SwitchMBB)
9324           visitSwitchCase(CB, SwitchMBB);
9325         else
9326           SwitchCases.push_back(CB);
9327 
9328         break;
9329       }
9330     }
9331     CurMBB = Fallthrough;
9332   }
9333 }
9334 
9335 unsigned SelectionDAGBuilder::caseClusterRank(const CaseCluster &CC,
9336                                               CaseClusterIt First,
9337                                               CaseClusterIt Last) {
9338   return std::count_if(First, Last + 1, [&](const CaseCluster &X) {
9339     if (X.Prob != CC.Prob)
9340       return X.Prob > CC.Prob;
9341 
9342     // Ties are broken by comparing the case value.
9343     return X.Low->getValue().slt(CC.Low->getValue());
9344   });
9345 }
9346 
9347 void SelectionDAGBuilder::splitWorkItem(SwitchWorkList &WorkList,
9348                                         const SwitchWorkListItem &W,
9349                                         Value *Cond,
9350                                         MachineBasicBlock *SwitchMBB) {
9351   assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) &&
9352          "Clusters not sorted?");
9353 
9354   assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!");
9355 
9356   // Balance the tree based on branch probabilities to create a near-optimal (in
9357   // terms of search time given key frequency) binary search tree. See e.g. Kurt
9358   // Mehlhorn "Nearly Optimal Binary Search Trees" (1975).
9359   CaseClusterIt LastLeft = W.FirstCluster;
9360   CaseClusterIt FirstRight = W.LastCluster;
9361   auto LeftProb = LastLeft->Prob + W.DefaultProb / 2;
9362   auto RightProb = FirstRight->Prob + W.DefaultProb / 2;
9363 
9364   // Move LastLeft and FirstRight towards each other from opposite directions to
9365   // find a partitioning of the clusters which balances the probability on both
9366   // sides. If LeftProb and RightProb are equal, alternate which side is
9367   // taken to ensure 0-probability nodes are distributed evenly.
9368   unsigned I = 0;
9369   while (LastLeft + 1 < FirstRight) {
9370     if (LeftProb < RightProb || (LeftProb == RightProb && (I & 1)))
9371       LeftProb += (++LastLeft)->Prob;
9372     else
9373       RightProb += (--FirstRight)->Prob;
9374     I++;
9375   }
9376 
9377   for (;;) {
9378     // Our binary search tree differs from a typical BST in that ours can have up
9379     // to three values in each leaf. The pivot selection above doesn't take that
9380     // into account, which means the tree might require more nodes and be less
9381     // efficient. We compensate for this here.
9382 
9383     unsigned NumLeft = LastLeft - W.FirstCluster + 1;
9384     unsigned NumRight = W.LastCluster - FirstRight + 1;
9385 
9386     if (std::min(NumLeft, NumRight) < 3 && std::max(NumLeft, NumRight) > 3) {
9387       // If one side has less than 3 clusters, and the other has more than 3,
9388       // consider taking a cluster from the other side.
9389 
9390       if (NumLeft < NumRight) {
9391         // Consider moving the first cluster on the right to the left side.
9392         CaseCluster &CC = *FirstRight;
9393         unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
9394         unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
9395         if (LeftSideRank <= RightSideRank) {
9396           // Moving the cluster to the left does not demote it.
9397           ++LastLeft;
9398           ++FirstRight;
9399           continue;
9400         }
9401       } else {
9402         assert(NumRight < NumLeft);
9403         // Consider moving the last element on the left to the right side.
9404         CaseCluster &CC = *LastLeft;
9405         unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
9406         unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
9407         if (RightSideRank <= LeftSideRank) {
9408           // Moving the cluster to the right does not demot it.
9409           --LastLeft;
9410           --FirstRight;
9411           continue;
9412         }
9413       }
9414     }
9415     break;
9416   }
9417 
9418   assert(LastLeft + 1 == FirstRight);
9419   assert(LastLeft >= W.FirstCluster);
9420   assert(FirstRight <= W.LastCluster);
9421 
9422   // Use the first element on the right as pivot since we will make less-than
9423   // comparisons against it.
9424   CaseClusterIt PivotCluster = FirstRight;
9425   assert(PivotCluster > W.FirstCluster);
9426   assert(PivotCluster <= W.LastCluster);
9427 
9428   CaseClusterIt FirstLeft = W.FirstCluster;
9429   CaseClusterIt LastRight = W.LastCluster;
9430 
9431   const ConstantInt *Pivot = PivotCluster->Low;
9432 
9433   // New blocks will be inserted immediately after the current one.
9434   MachineFunction::iterator BBI(W.MBB);
9435   ++BBI;
9436 
9437   // We will branch to the LHS if Value < Pivot. If LHS is a single cluster,
9438   // we can branch to its destination directly if it's squeezed exactly in
9439   // between the known lower bound and Pivot - 1.
9440   MachineBasicBlock *LeftMBB;
9441   if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&
9442       FirstLeft->Low == W.GE &&
9443       (FirstLeft->High->getValue() + 1LL) == Pivot->getValue()) {
9444     LeftMBB = FirstLeft->MBB;
9445   } else {
9446     LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
9447     FuncInfo.MF->insert(BBI, LeftMBB);
9448     WorkList.push_back(
9449         {LeftMBB, FirstLeft, LastLeft, W.GE, Pivot, W.DefaultProb / 2});
9450     // Put Cond in a virtual register to make it available from the new blocks.
9451     ExportFromCurrentBlock(Cond);
9452   }
9453 
9454   // Similarly, we will branch to the RHS if Value >= Pivot. If RHS is a
9455   // single cluster, RHS.Low == Pivot, and we can branch to its destination
9456   // directly if RHS.High equals the current upper bound.
9457   MachineBasicBlock *RightMBB;
9458   if (FirstRight == LastRight && FirstRight->Kind == CC_Range &&
9459       W.LT && (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) {
9460     RightMBB = FirstRight->MBB;
9461   } else {
9462     RightMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
9463     FuncInfo.MF->insert(BBI, RightMBB);
9464     WorkList.push_back(
9465         {RightMBB, FirstRight, LastRight, Pivot, W.LT, W.DefaultProb / 2});
9466     // Put Cond in a virtual register to make it available from the new blocks.
9467     ExportFromCurrentBlock(Cond);
9468   }
9469 
9470   // Create the CaseBlock record that will be used to lower the branch.
9471   CaseBlock CB(ISD::SETLT, Cond, Pivot, nullptr, LeftMBB, RightMBB, W.MBB,
9472                LeftProb, RightProb);
9473 
9474   if (W.MBB == SwitchMBB)
9475     visitSwitchCase(CB, SwitchMBB);
9476   else
9477     SwitchCases.push_back(CB);
9478 }
9479 
9480 void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
9481   // Extract cases from the switch.
9482   BranchProbabilityInfo *BPI = FuncInfo.BPI;
9483   CaseClusterVector Clusters;
9484   Clusters.reserve(SI.getNumCases());
9485   for (auto I : SI.cases()) {
9486     MachineBasicBlock *Succ = FuncInfo.MBBMap[I.getCaseSuccessor()];
9487     const ConstantInt *CaseVal = I.getCaseValue();
9488     BranchProbability Prob =
9489         BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
9490             : BranchProbability(1, SI.getNumCases() + 1);
9491     Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
9492   }
9493 
9494   MachineBasicBlock *DefaultMBB = FuncInfo.MBBMap[SI.getDefaultDest()];
9495 
9496   // Cluster adjacent cases with the same destination. We do this at all
9497   // optimization levels because it's cheap to do and will make codegen faster
9498   // if there are many clusters.
9499   sortAndRangeify(Clusters);
9500 
9501   if (TM.getOptLevel() != CodeGenOpt::None) {
9502     // Replace an unreachable default with the most popular destination.
9503     // FIXME: Exploit unreachable default more aggressively.
9504     bool UnreachableDefault =
9505         isa<UnreachableInst>(SI.getDefaultDest()->getFirstNonPHIOrDbg());
9506     if (UnreachableDefault && !Clusters.empty()) {
9507       DenseMap<const BasicBlock *, unsigned> Popularity;
9508       unsigned MaxPop = 0;
9509       const BasicBlock *MaxBB = nullptr;
9510       for (auto I : SI.cases()) {
9511         const BasicBlock *BB = I.getCaseSuccessor();
9512         if (++Popularity[BB] > MaxPop) {
9513           MaxPop = Popularity[BB];
9514           MaxBB = BB;
9515         }
9516       }
9517       // Set new default.
9518       assert(MaxPop > 0 && MaxBB);
9519       DefaultMBB = FuncInfo.MBBMap[MaxBB];
9520 
9521       // Remove cases that were pointing to the destination that is now the
9522       // default.
9523       CaseClusterVector New;
9524       New.reserve(Clusters.size());
9525       for (CaseCluster &CC : Clusters) {
9526         if (CC.MBB != DefaultMBB)
9527           New.push_back(CC);
9528       }
9529       Clusters = std::move(New);
9530     }
9531   }
9532 
9533   // If there is only the default destination, jump there directly.
9534   MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
9535   if (Clusters.empty()) {
9536     SwitchMBB->addSuccessor(DefaultMBB);
9537     if (DefaultMBB != NextBlock(SwitchMBB)) {
9538       DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
9539                               getControlRoot(), DAG.getBasicBlock(DefaultMBB)));
9540     }
9541     return;
9542   }
9543 
9544   findJumpTables(Clusters, &SI, DefaultMBB);
9545   findBitTestClusters(Clusters, &SI);
9546 
9547   DEBUG({
9548     dbgs() << "Case clusters: ";
9549     for (const CaseCluster &C : Clusters) {
9550       if (C.Kind == CC_JumpTable) dbgs() << "JT:";
9551       if (C.Kind == CC_BitTests) dbgs() << "BT:";
9552 
9553       C.Low->getValue().print(dbgs(), true);
9554       if (C.Low != C.High) {
9555         dbgs() << '-';
9556         C.High->getValue().print(dbgs(), true);
9557       }
9558       dbgs() << ' ';
9559     }
9560     dbgs() << '\n';
9561   });
9562 
9563   assert(!Clusters.empty());
9564   SwitchWorkList WorkList;
9565   CaseClusterIt First = Clusters.begin();
9566   CaseClusterIt Last = Clusters.end() - 1;
9567   auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB);
9568   WorkList.push_back({SwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
9569 
9570   while (!WorkList.empty()) {
9571     SwitchWorkListItem W = WorkList.back();
9572     WorkList.pop_back();
9573     unsigned NumClusters = W.LastCluster - W.FirstCluster + 1;
9574 
9575     if (NumClusters > 3 && TM.getOptLevel() != CodeGenOpt::None &&
9576         !DefaultMBB->getParent()->getFunction()->optForMinSize()) {
9577       // For optimized builds, lower large range as a balanced binary tree.
9578       splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB);
9579       continue;
9580     }
9581 
9582     lowerWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB);
9583   }
9584 }
9585