xref: /llvm-project/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp (revision d0af7e8ab822031960c68d5893ec07c1d186b67a)
1 //===-- SelectionDAGBuilder.cpp - Selection-DAG building ------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This implements routines for translating from LLVM IR into SelectionDAG IR.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "SelectionDAGBuilder.h"
15 #include "SDNodeDbgValue.h"
16 #include "llvm/ADT/BitVector.h"
17 #include "llvm/ADT/Optional.h"
18 #include "llvm/ADT/SmallSet.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/Analysis/AliasAnalysis.h"
21 #include "llvm/Analysis/BranchProbabilityInfo.h"
22 #include "llvm/Analysis/ConstantFolding.h"
23 #include "llvm/Analysis/Loads.h"
24 #include "llvm/Analysis/TargetLibraryInfo.h"
25 #include "llvm/Analysis/ValueTracking.h"
26 #include "llvm/Analysis/VectorUtils.h"
27 #include "llvm/CodeGen/Analysis.h"
28 #include "llvm/CodeGen/FastISel.h"
29 #include "llvm/CodeGen/FunctionLoweringInfo.h"
30 #include "llvm/CodeGen/GCMetadata.h"
31 #include "llvm/CodeGen/GCStrategy.h"
32 #include "llvm/CodeGen/MachineFrameInfo.h"
33 #include "llvm/CodeGen/MachineFunction.h"
34 #include "llvm/CodeGen/MachineInstrBuilder.h"
35 #include "llvm/CodeGen/MachineJumpTableInfo.h"
36 #include "llvm/CodeGen/MachineModuleInfo.h"
37 #include "llvm/CodeGen/MachineRegisterInfo.h"
38 #include "llvm/CodeGen/SelectionDAG.h"
39 #include "llvm/CodeGen/SelectionDAGTargetInfo.h"
40 #include "llvm/CodeGen/StackMaps.h"
41 #include "llvm/CodeGen/WinEHFuncInfo.h"
42 #include "llvm/IR/CallingConv.h"
43 #include "llvm/IR/ConstantRange.h"
44 #include "llvm/IR/Constants.h"
45 #include "llvm/IR/DataLayout.h"
46 #include "llvm/IR/DebugInfo.h"
47 #include "llvm/IR/DerivedTypes.h"
48 #include "llvm/IR/Function.h"
49 #include "llvm/IR/GetElementPtrTypeIterator.h"
50 #include "llvm/IR/GlobalVariable.h"
51 #include "llvm/IR/InlineAsm.h"
52 #include "llvm/IR/Instructions.h"
53 #include "llvm/IR/IntrinsicInst.h"
54 #include "llvm/IR/Intrinsics.h"
55 #include "llvm/IR/LLVMContext.h"
56 #include "llvm/IR/Module.h"
57 #include "llvm/IR/Statepoint.h"
58 #include "llvm/MC/MCSymbol.h"
59 #include "llvm/Support/CommandLine.h"
60 #include "llvm/Support/Debug.h"
61 #include "llvm/Support/ErrorHandling.h"
62 #include "llvm/Support/MathExtras.h"
63 #include "llvm/Support/raw_ostream.h"
64 #include "llvm/Target/TargetFrameLowering.h"
65 #include "llvm/Target/TargetInstrInfo.h"
66 #include "llvm/Target/TargetIntrinsicInfo.h"
67 #include "llvm/Target/TargetLowering.h"
68 #include "llvm/Target/TargetOptions.h"
69 #include "llvm/Target/TargetSubtargetInfo.h"
70 #include <algorithm>
71 #include <utility>
72 using namespace llvm;
73 
74 #define DEBUG_TYPE "isel"
75 
76 /// LimitFloatPrecision - Generate low-precision inline sequences for
77 /// some float libcalls (6, 8 or 12 bits).
78 static unsigned LimitFloatPrecision;
79 
80 static cl::opt<unsigned, true>
81 LimitFPPrecision("limit-float-precision",
82                  cl::desc("Generate low-precision inline sequences "
83                           "for some float libcalls"),
84                  cl::location(LimitFloatPrecision),
85                  cl::init(0));
86 
87 /// Minimum jump table density for normal functions.
88 static cl::opt<unsigned>
89 JumpTableDensity("jump-table-density", cl::init(10), cl::Hidden,
90                  cl::desc("Minimum density for building a jump table in "
91                           "a normal function"));
92 
93 /// Minimum jump table density for -Os or -Oz functions.
94 static cl::opt<unsigned>
95 OptsizeJumpTableDensity("optsize-jump-table-density", cl::init(40), cl::Hidden,
96                         cl::desc("Minimum density for building a jump table in "
97                                  "an optsize function"));
98 
99 
100 // Limit the width of DAG chains. This is important in general to prevent
101 // DAG-based analysis from blowing up. For example, alias analysis and
102 // load clustering may not complete in reasonable time. It is difficult to
103 // recognize and avoid this situation within each individual analysis, and
104 // future analyses are likely to have the same behavior. Limiting DAG width is
105 // the safe approach and will be especially important with global DAGs.
106 //
107 // MaxParallelChains default is arbitrarily high to avoid affecting
108 // optimization, but could be lowered to improve compile time. Any ld-ld-st-st
109 // sequence over this should have been converted to llvm.memcpy by the
110 // frontend. It is easy to induce this behavior with .ll code such as:
111 // %buffer = alloca [4096 x i8]
112 // %data = load [4096 x i8]* %argPtr
113 // store [4096 x i8] %data, [4096 x i8]* %buffer
114 static const unsigned MaxParallelChains = 64;
115 
116 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
117                                       const SDValue *Parts, unsigned NumParts,
118                                       MVT PartVT, EVT ValueVT, const Value *V);
119 
120 /// getCopyFromParts - Create a value that contains the specified legal parts
121 /// combined into the value they represent.  If the parts combine to a type
122 /// larger than ValueVT then AssertOp can be used to specify whether the extra
123 /// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
124 /// (ISD::AssertSext).
125 static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL,
126                                 const SDValue *Parts, unsigned NumParts,
127                                 MVT PartVT, EVT ValueVT, const Value *V,
128                                 Optional<ISD::NodeType> AssertOp = None) {
129   if (ValueVT.isVector())
130     return getCopyFromPartsVector(DAG, DL, Parts, NumParts,
131                                   PartVT, ValueVT, V);
132 
133   assert(NumParts > 0 && "No parts to assemble!");
134   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
135   SDValue Val = Parts[0];
136 
137   if (NumParts > 1) {
138     // Assemble the value from multiple parts.
139     if (ValueVT.isInteger()) {
140       unsigned PartBits = PartVT.getSizeInBits();
141       unsigned ValueBits = ValueVT.getSizeInBits();
142 
143       // Assemble the power of 2 part.
144       unsigned RoundParts = NumParts & (NumParts - 1) ?
145         1 << Log2_32(NumParts) : NumParts;
146       unsigned RoundBits = PartBits * RoundParts;
147       EVT RoundVT = RoundBits == ValueBits ?
148         ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
149       SDValue Lo, Hi;
150 
151       EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
152 
153       if (RoundParts > 2) {
154         Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2,
155                               PartVT, HalfVT, V);
156         Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2,
157                               RoundParts / 2, PartVT, HalfVT, V);
158       } else {
159         Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]);
160         Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]);
161       }
162 
163       if (DAG.getDataLayout().isBigEndian())
164         std::swap(Lo, Hi);
165 
166       Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi);
167 
168       if (RoundParts < NumParts) {
169         // Assemble the trailing non-power-of-2 part.
170         unsigned OddParts = NumParts - RoundParts;
171         EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
172         Hi = getCopyFromParts(DAG, DL,
173                               Parts + RoundParts, OddParts, PartVT, OddVT, V);
174 
175         // Combine the round and odd parts.
176         Lo = Val;
177         if (DAG.getDataLayout().isBigEndian())
178           std::swap(Lo, Hi);
179         EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
180         Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi);
181         Hi =
182             DAG.getNode(ISD::SHL, DL, TotalVT, Hi,
183                         DAG.getConstant(Lo.getValueSizeInBits(), DL,
184                                         TLI.getPointerTy(DAG.getDataLayout())));
185         Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo);
186         Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi);
187       }
188     } else if (PartVT.isFloatingPoint()) {
189       // FP split into multiple FP parts (for ppcf128)
190       assert(ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
191              "Unexpected split");
192       SDValue Lo, Hi;
193       Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]);
194       Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]);
195       if (TLI.hasBigEndianPartOrdering(ValueVT, DAG.getDataLayout()))
196         std::swap(Lo, Hi);
197       Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi);
198     } else {
199       // FP split into integer parts (soft fp)
200       assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
201              !PartVT.isVector() && "Unexpected split");
202       EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
203       Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V);
204     }
205   }
206 
207   // There is now one part, held in Val.  Correct it to match ValueVT.
208   // PartEVT is the type of the register class that holds the value.
209   // ValueVT is the type of the inline asm operation.
210   EVT PartEVT = Val.getValueType();
211 
212   if (PartEVT == ValueVT)
213     return Val;
214 
215   if (PartEVT.isInteger() && ValueVT.isFloatingPoint() &&
216       ValueVT.bitsLT(PartEVT)) {
217     // For an FP value in an integer part, we need to truncate to the right
218     // width first.
219     PartEVT = EVT::getIntegerVT(*DAG.getContext(),  ValueVT.getSizeInBits());
220     Val = DAG.getNode(ISD::TRUNCATE, DL, PartEVT, Val);
221   }
222 
223   // Handle types that have the same size.
224   if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits())
225     return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
226 
227   // Handle types with different sizes.
228   if (PartEVT.isInteger() && ValueVT.isInteger()) {
229     if (ValueVT.bitsLT(PartEVT)) {
230       // For a truncate, see if we have any information to
231       // indicate whether the truncated bits will always be
232       // zero or sign-extension.
233       if (AssertOp.hasValue())
234         Val = DAG.getNode(*AssertOp, DL, PartEVT, Val,
235                           DAG.getValueType(ValueVT));
236       return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
237     }
238     return DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
239   }
240 
241   if (PartEVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
242     // FP_ROUND's are always exact here.
243     if (ValueVT.bitsLT(Val.getValueType()))
244       return DAG.getNode(
245           ISD::FP_ROUND, DL, ValueVT, Val,
246           DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout())));
247 
248     return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val);
249   }
250 
251   llvm_unreachable("Unknown mismatch!");
252 }
253 
254 static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V,
255                                               const Twine &ErrMsg) {
256   const Instruction *I = dyn_cast_or_null<Instruction>(V);
257   if (!V)
258     return Ctx.emitError(ErrMsg);
259 
260   const char *AsmError = ", possible invalid constraint for vector type";
261   if (const CallInst *CI = dyn_cast<CallInst>(I))
262     if (isa<InlineAsm>(CI->getCalledValue()))
263       return Ctx.emitError(I, ErrMsg + AsmError);
264 
265   return Ctx.emitError(I, ErrMsg);
266 }
267 
268 /// getCopyFromPartsVector - Create a value that contains the specified legal
269 /// parts combined into the value they represent.  If the parts combine to a
270 /// type larger than ValueVT then AssertOp can be used to specify whether the
271 /// extra bits are known to be zero (ISD::AssertZext) or sign extended from
272 /// ValueVT (ISD::AssertSext).
273 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
274                                       const SDValue *Parts, unsigned NumParts,
275                                       MVT PartVT, EVT ValueVT, const Value *V) {
276   assert(ValueVT.isVector() && "Not a vector value");
277   assert(NumParts > 0 && "No parts to assemble!");
278   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
279   SDValue Val = Parts[0];
280 
281   // Handle a multi-element vector.
282   if (NumParts > 1) {
283     EVT IntermediateVT;
284     MVT RegisterVT;
285     unsigned NumIntermediates;
286     unsigned NumRegs =
287     TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
288                                NumIntermediates, RegisterVT);
289     assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
290     NumParts = NumRegs; // Silence a compiler warning.
291     assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
292     assert(RegisterVT.getSizeInBits() ==
293            Parts[0].getSimpleValueType().getSizeInBits() &&
294            "Part type sizes don't match!");
295 
296     // Assemble the parts into intermediate operands.
297     SmallVector<SDValue, 8> Ops(NumIntermediates);
298     if (NumIntermediates == NumParts) {
299       // If the register was not expanded, truncate or copy the value,
300       // as appropriate.
301       for (unsigned i = 0; i != NumParts; ++i)
302         Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1,
303                                   PartVT, IntermediateVT, V);
304     } else if (NumParts > 0) {
305       // If the intermediate type was expanded, build the intermediate
306       // operands from the parts.
307       assert(NumParts % NumIntermediates == 0 &&
308              "Must expand into a divisible number of parts!");
309       unsigned Factor = NumParts / NumIntermediates;
310       for (unsigned i = 0; i != NumIntermediates; ++i)
311         Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor,
312                                   PartVT, IntermediateVT, V);
313     }
314 
315     // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
316     // intermediate operands.
317     Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS
318                                                 : ISD::BUILD_VECTOR,
319                       DL, ValueVT, Ops);
320   }
321 
322   // There is now one part, held in Val.  Correct it to match ValueVT.
323   EVT PartEVT = Val.getValueType();
324 
325   if (PartEVT == ValueVT)
326     return Val;
327 
328   if (PartEVT.isVector()) {
329     // If the element type of the source/dest vectors are the same, but the
330     // parts vector has more elements than the value vector, then we have a
331     // vector widening case (e.g. <2 x float> -> <4 x float>).  Extract the
332     // elements we want.
333     if (PartEVT.getVectorElementType() == ValueVT.getVectorElementType()) {
334       assert(PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements() &&
335              "Cannot narrow, it would be a lossy transformation");
336       return DAG.getNode(
337           ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
338           DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
339     }
340 
341     // Vector/Vector bitcast.
342     if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
343       return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
344 
345     assert(PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements() &&
346       "Cannot handle this kind of promotion");
347     // Promoted vector extract
348     return DAG.getAnyExtOrTrunc(Val, DL, ValueVT);
349 
350   }
351 
352   // Trivial bitcast if the types are the same size and the destination
353   // vector type is legal.
354   if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits() &&
355       TLI.isTypeLegal(ValueVT))
356     return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
357 
358   // Handle cases such as i8 -> <1 x i1>
359   if (ValueVT.getVectorNumElements() != 1) {
360     diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
361                                       "non-trivial scalar-to-vector conversion");
362     return DAG.getUNDEF(ValueVT);
363   }
364 
365   EVT ValueSVT = ValueVT.getVectorElementType();
366   if (ValueVT.getVectorNumElements() == 1 && ValueSVT != PartEVT)
367     Val = DAG.getAnyExtOrTrunc(Val, DL, ValueSVT);
368 
369   return DAG.getBuildVector(ValueVT, DL, Val);
370 }
371 
372 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl,
373                                  SDValue Val, SDValue *Parts, unsigned NumParts,
374                                  MVT PartVT, const Value *V);
375 
376 /// getCopyToParts - Create a series of nodes that contain the specified value
377 /// split into legal parts.  If the parts contain more bits than Val, then, for
378 /// integers, ExtendKind can be used to specify how to generate the extra bits.
379 static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val,
380                            SDValue *Parts, unsigned NumParts, MVT PartVT,
381                            const Value *V,
382                            ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
383   EVT ValueVT = Val.getValueType();
384 
385   // Handle the vector case separately.
386   if (ValueVT.isVector())
387     return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V);
388 
389   unsigned PartBits = PartVT.getSizeInBits();
390   unsigned OrigNumParts = NumParts;
391   assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) &&
392          "Copying to an illegal type!");
393 
394   if (NumParts == 0)
395     return;
396 
397   assert(!ValueVT.isVector() && "Vector case handled elsewhere");
398   EVT PartEVT = PartVT;
399   if (PartEVT == ValueVT) {
400     assert(NumParts == 1 && "No-op copy with multiple parts!");
401     Parts[0] = Val;
402     return;
403   }
404 
405   if (NumParts * PartBits > ValueVT.getSizeInBits()) {
406     // If the parts cover more bits than the value has, promote the value.
407     if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
408       assert(NumParts == 1 && "Do not know what to promote to!");
409       Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val);
410     } else {
411       if (ValueVT.isFloatingPoint()) {
412         // FP values need to be bitcast, then extended if they are being put
413         // into a larger container.
414         ValueVT = EVT::getIntegerVT(*DAG.getContext(),  ValueVT.getSizeInBits());
415         Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
416       }
417       assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
418              ValueVT.isInteger() &&
419              "Unknown mismatch!");
420       ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
421       Val = DAG.getNode(ExtendKind, DL, ValueVT, Val);
422       if (PartVT == MVT::x86mmx)
423         Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
424     }
425   } else if (PartBits == ValueVT.getSizeInBits()) {
426     // Different types of the same size.
427     assert(NumParts == 1 && PartEVT != ValueVT);
428     Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
429   } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
430     // If the parts cover less bits than value has, truncate the value.
431     assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
432            ValueVT.isInteger() &&
433            "Unknown mismatch!");
434     ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
435     Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
436     if (PartVT == MVT::x86mmx)
437       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
438   }
439 
440   // The value may have changed - recompute ValueVT.
441   ValueVT = Val.getValueType();
442   assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
443          "Failed to tile the value with PartVT!");
444 
445   if (NumParts == 1) {
446     if (PartEVT != ValueVT) {
447       diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
448                                         "scalar-to-vector conversion failed");
449       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
450     }
451 
452     Parts[0] = Val;
453     return;
454   }
455 
456   // Expand the value into multiple parts.
457   if (NumParts & (NumParts - 1)) {
458     // The number of parts is not a power of 2.  Split off and copy the tail.
459     assert(PartVT.isInteger() && ValueVT.isInteger() &&
460            "Do not know what to expand to!");
461     unsigned RoundParts = 1 << Log2_32(NumParts);
462     unsigned RoundBits = RoundParts * PartBits;
463     unsigned OddParts = NumParts - RoundParts;
464     SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val,
465                                  DAG.getIntPtrConstant(RoundBits, DL));
466     getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V);
467 
468     if (DAG.getDataLayout().isBigEndian())
469       // The odd parts were reversed by getCopyToParts - unreverse them.
470       std::reverse(Parts + RoundParts, Parts + NumParts);
471 
472     NumParts = RoundParts;
473     ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
474     Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
475   }
476 
477   // The number of parts is a power of 2.  Repeatedly bisect the value using
478   // EXTRACT_ELEMENT.
479   Parts[0] = DAG.getNode(ISD::BITCAST, DL,
480                          EVT::getIntegerVT(*DAG.getContext(),
481                                            ValueVT.getSizeInBits()),
482                          Val);
483 
484   for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
485     for (unsigned i = 0; i < NumParts; i += StepSize) {
486       unsigned ThisBits = StepSize * PartBits / 2;
487       EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
488       SDValue &Part0 = Parts[i];
489       SDValue &Part1 = Parts[i+StepSize/2];
490 
491       Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
492                           ThisVT, Part0, DAG.getIntPtrConstant(1, DL));
493       Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
494                           ThisVT, Part0, DAG.getIntPtrConstant(0, DL));
495 
496       if (ThisBits == PartBits && ThisVT != PartVT) {
497         Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0);
498         Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1);
499       }
500     }
501   }
502 
503   if (DAG.getDataLayout().isBigEndian())
504     std::reverse(Parts, Parts + OrigNumParts);
505 }
506 
507 
508 /// getCopyToPartsVector - Create a series of nodes that contain the specified
509 /// value split into legal parts.
510 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
511                                  SDValue Val, SDValue *Parts, unsigned NumParts,
512                                  MVT PartVT, const Value *V) {
513   EVT ValueVT = Val.getValueType();
514   assert(ValueVT.isVector() && "Not a vector");
515   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
516 
517   if (NumParts == 1) {
518     EVT PartEVT = PartVT;
519     if (PartEVT == ValueVT) {
520       // Nothing to do.
521     } else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
522       // Bitconvert vector->vector case.
523       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
524     } else if (PartVT.isVector() &&
525                PartEVT.getVectorElementType() == ValueVT.getVectorElementType() &&
526                PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements()) {
527       EVT ElementVT = PartVT.getVectorElementType();
528       // Vector widening case, e.g. <2 x float> -> <4 x float>.  Shuffle in
529       // undef elements.
530       SmallVector<SDValue, 16> Ops;
531       for (unsigned i = 0, e = ValueVT.getVectorNumElements(); i != e; ++i)
532         Ops.push_back(DAG.getNode(
533             ISD::EXTRACT_VECTOR_ELT, DL, ElementVT, Val,
534             DAG.getConstant(i, DL, TLI.getVectorIdxTy(DAG.getDataLayout()))));
535 
536       for (unsigned i = ValueVT.getVectorNumElements(),
537            e = PartVT.getVectorNumElements(); i != e; ++i)
538         Ops.push_back(DAG.getUNDEF(ElementVT));
539 
540       Val = DAG.getBuildVector(PartVT, DL, Ops);
541 
542       // FIXME: Use CONCAT for 2x -> 4x.
543 
544       //SDValue UndefElts = DAG.getUNDEF(VectorTy);
545       //Val = DAG.getNode(ISD::CONCAT_VECTORS, DL, PartVT, Val, UndefElts);
546     } else if (PartVT.isVector() &&
547                PartEVT.getVectorElementType().bitsGE(
548                  ValueVT.getVectorElementType()) &&
549                PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements()) {
550 
551       // Promoted vector extract
552       Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
553     } else{
554       // Vector -> scalar conversion.
555       assert(ValueVT.getVectorNumElements() == 1 &&
556              "Only trivial vector-to-scalar conversions should get here!");
557       Val = DAG.getNode(
558           ISD::EXTRACT_VECTOR_ELT, DL, PartVT, Val,
559           DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
560 
561       Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
562     }
563 
564     Parts[0] = Val;
565     return;
566   }
567 
568   // Handle a multi-element vector.
569   EVT IntermediateVT;
570   MVT RegisterVT;
571   unsigned NumIntermediates;
572   unsigned NumRegs = TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT,
573                                                 IntermediateVT,
574                                                 NumIntermediates, RegisterVT);
575   unsigned NumElements = ValueVT.getVectorNumElements();
576 
577   assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
578   NumParts = NumRegs; // Silence a compiler warning.
579   assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
580 
581   // Split the vector into intermediate operands.
582   SmallVector<SDValue, 8> Ops(NumIntermediates);
583   for (unsigned i = 0; i != NumIntermediates; ++i) {
584     if (IntermediateVT.isVector())
585       Ops[i] =
586           DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, IntermediateVT, Val,
587                       DAG.getConstant(i * (NumElements / NumIntermediates), DL,
588                                       TLI.getVectorIdxTy(DAG.getDataLayout())));
589     else
590       Ops[i] = DAG.getNode(
591           ISD::EXTRACT_VECTOR_ELT, DL, IntermediateVT, Val,
592           DAG.getConstant(i, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
593   }
594 
595   // Split the intermediate operands into legal parts.
596   if (NumParts == NumIntermediates) {
597     // If the register was not expanded, promote or copy the value,
598     // as appropriate.
599     for (unsigned i = 0; i != NumParts; ++i)
600       getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V);
601   } else if (NumParts > 0) {
602     // If the intermediate type was expanded, split each the value into
603     // legal parts.
604     assert(NumIntermediates != 0 && "division by zero");
605     assert(NumParts % NumIntermediates == 0 &&
606            "Must expand into a divisible number of parts!");
607     unsigned Factor = NumParts / NumIntermediates;
608     for (unsigned i = 0; i != NumIntermediates; ++i)
609       getCopyToParts(DAG, DL, Ops[i], &Parts[i*Factor], Factor, PartVT, V);
610   }
611 }
612 
613 RegsForValue::RegsForValue() {}
614 
615 RegsForValue::RegsForValue(const SmallVector<unsigned, 4> &regs, MVT regvt,
616                            EVT valuevt)
617     : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs) {}
618 
619 RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
620                            const DataLayout &DL, unsigned Reg, Type *Ty) {
621   ComputeValueVTs(TLI, DL, Ty, ValueVTs);
622 
623   for (EVT ValueVT : ValueVTs) {
624     unsigned NumRegs = TLI.getNumRegisters(Context, ValueVT);
625     MVT RegisterVT = TLI.getRegisterType(Context, ValueVT);
626     for (unsigned i = 0; i != NumRegs; ++i)
627       Regs.push_back(Reg + i);
628     RegVTs.push_back(RegisterVT);
629     Reg += NumRegs;
630   }
631 }
632 
633 SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
634                                       FunctionLoweringInfo &FuncInfo,
635                                       const SDLoc &dl, SDValue &Chain,
636                                       SDValue *Flag, const Value *V) const {
637   // A Value with type {} or [0 x %t] needs no registers.
638   if (ValueVTs.empty())
639     return SDValue();
640 
641   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
642 
643   // Assemble the legal parts into the final values.
644   SmallVector<SDValue, 4> Values(ValueVTs.size());
645   SmallVector<SDValue, 8> Parts;
646   for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
647     // Copy the legal parts from the registers.
648     EVT ValueVT = ValueVTs[Value];
649     unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVT);
650     MVT RegisterVT = RegVTs[Value];
651 
652     Parts.resize(NumRegs);
653     for (unsigned i = 0; i != NumRegs; ++i) {
654       SDValue P;
655       if (!Flag) {
656         P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
657       } else {
658         P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag);
659         *Flag = P.getValue(2);
660       }
661 
662       Chain = P.getValue(1);
663       Parts[i] = P;
664 
665       // If the source register was virtual and if we know something about it,
666       // add an assert node.
667       if (!TargetRegisterInfo::isVirtualRegister(Regs[Part+i]) ||
668           !RegisterVT.isInteger() || RegisterVT.isVector())
669         continue;
670 
671       const FunctionLoweringInfo::LiveOutInfo *LOI =
672         FuncInfo.GetLiveOutRegInfo(Regs[Part+i]);
673       if (!LOI)
674         continue;
675 
676       unsigned RegSize = RegisterVT.getSizeInBits();
677       unsigned NumSignBits = LOI->NumSignBits;
678       unsigned NumZeroBits = LOI->Known.Zero.countLeadingOnes();
679 
680       if (NumZeroBits == RegSize) {
681         // The current value is a zero.
682         // Explicitly express that as it would be easier for
683         // optimizations to kick in.
684         Parts[i] = DAG.getConstant(0, dl, RegisterVT);
685         continue;
686       }
687 
688       // FIXME: We capture more information than the dag can represent.  For
689       // now, just use the tightest assertzext/assertsext possible.
690       bool isSExt = true;
691       EVT FromVT(MVT::Other);
692       if (NumSignBits == RegSize) {
693         isSExt = true;   // ASSERT SEXT 1
694         FromVT = MVT::i1;
695       } else if (NumZeroBits >= RegSize - 1) {
696         isSExt = false;  // ASSERT ZEXT 1
697         FromVT = MVT::i1;
698       } else if (NumSignBits > RegSize - 8) {
699         isSExt = true;   // ASSERT SEXT 8
700         FromVT = MVT::i8;
701       } else if (NumZeroBits >= RegSize - 8) {
702         isSExt = false;  // ASSERT ZEXT 8
703         FromVT = MVT::i8;
704       } else if (NumSignBits > RegSize - 16) {
705         isSExt = true;   // ASSERT SEXT 16
706         FromVT = MVT::i16;
707       } else if (NumZeroBits >= RegSize - 16) {
708         isSExt = false;  // ASSERT ZEXT 16
709         FromVT = MVT::i16;
710       } else if (NumSignBits > RegSize - 32) {
711         isSExt = true;   // ASSERT SEXT 32
712         FromVT = MVT::i32;
713       } else if (NumZeroBits >= RegSize - 32) {
714         isSExt = false;  // ASSERT ZEXT 32
715         FromVT = MVT::i32;
716       } else {
717         continue;
718       }
719       // Add an assertion node.
720       assert(FromVT != MVT::Other);
721       Parts[i] = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
722                              RegisterVT, P, DAG.getValueType(FromVT));
723     }
724 
725     Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(),
726                                      NumRegs, RegisterVT, ValueVT, V);
727     Part += NumRegs;
728     Parts.clear();
729   }
730 
731   return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(ValueVTs), Values);
732 }
733 
734 void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG,
735                                  const SDLoc &dl, SDValue &Chain, SDValue *Flag,
736                                  const Value *V,
737                                  ISD::NodeType PreferredExtendType) const {
738   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
739   ISD::NodeType ExtendKind = PreferredExtendType;
740 
741   // Get the list of the values's legal parts.
742   unsigned NumRegs = Regs.size();
743   SmallVector<SDValue, 8> Parts(NumRegs);
744   for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
745     EVT ValueVT = ValueVTs[Value];
746     unsigned NumParts = TLI.getNumRegisters(*DAG.getContext(), ValueVT);
747     MVT RegisterVT = RegVTs[Value];
748 
749     if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT))
750       ExtendKind = ISD::ZERO_EXTEND;
751 
752     getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value),
753                    &Parts[Part], NumParts, RegisterVT, V, ExtendKind);
754     Part += NumParts;
755   }
756 
757   // Copy the parts into the registers.
758   SmallVector<SDValue, 8> Chains(NumRegs);
759   for (unsigned i = 0; i != NumRegs; ++i) {
760     SDValue Part;
761     if (!Flag) {
762       Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
763     } else {
764       Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag);
765       *Flag = Part.getValue(1);
766     }
767 
768     Chains[i] = Part.getValue(0);
769   }
770 
771   if (NumRegs == 1 || Flag)
772     // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is
773     // flagged to it. That is the CopyToReg nodes and the user are considered
774     // a single scheduling unit. If we create a TokenFactor and return it as
775     // chain, then the TokenFactor is both a predecessor (operand) of the
776     // user as well as a successor (the TF operands are flagged to the user).
777     // c1, f1 = CopyToReg
778     // c2, f2 = CopyToReg
779     // c3     = TokenFactor c1, c2
780     // ...
781     //        = op c3, ..., f2
782     Chain = Chains[NumRegs-1];
783   else
784     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
785 }
786 
787 void RegsForValue::AddInlineAsmOperands(unsigned Code, bool HasMatching,
788                                         unsigned MatchingIdx, const SDLoc &dl,
789                                         SelectionDAG &DAG,
790                                         std::vector<SDValue> &Ops) const {
791   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
792 
793   unsigned Flag = InlineAsm::getFlagWord(Code, Regs.size());
794   if (HasMatching)
795     Flag = InlineAsm::getFlagWordForMatchingOp(Flag, MatchingIdx);
796   else if (!Regs.empty() &&
797            TargetRegisterInfo::isVirtualRegister(Regs.front())) {
798     // Put the register class of the virtual registers in the flag word.  That
799     // way, later passes can recompute register class constraints for inline
800     // assembly as well as normal instructions.
801     // Don't do this for tied operands that can use the regclass information
802     // from the def.
803     const MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
804     const TargetRegisterClass *RC = MRI.getRegClass(Regs.front());
805     Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID());
806   }
807 
808   SDValue Res = DAG.getTargetConstant(Flag, dl, MVT::i32);
809   Ops.push_back(Res);
810 
811   unsigned SP = TLI.getStackPointerRegisterToSaveRestore();
812   for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
813     unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value]);
814     MVT RegisterVT = RegVTs[Value];
815     for (unsigned i = 0; i != NumRegs; ++i) {
816       assert(Reg < Regs.size() && "Mismatch in # registers expected");
817       unsigned TheReg = Regs[Reg++];
818       Ops.push_back(DAG.getRegister(TheReg, RegisterVT));
819 
820       if (TheReg == SP && Code == InlineAsm::Kind_Clobber) {
821         // If we clobbered the stack pointer, MFI should know about it.
822         assert(DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment());
823       }
824     }
825   }
826 }
827 
828 void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis &aa,
829                                const TargetLibraryInfo *li) {
830   AA = &aa;
831   GFI = gfi;
832   LibInfo = li;
833   DL = &DAG.getDataLayout();
834   Context = DAG.getContext();
835   LPadToCallSiteMap.clear();
836 }
837 
838 void SelectionDAGBuilder::clear() {
839   NodeMap.clear();
840   UnusedArgNodeMap.clear();
841   PendingLoads.clear();
842   PendingExports.clear();
843   CurInst = nullptr;
844   HasTailCall = false;
845   SDNodeOrder = LowestSDNodeOrder;
846   StatepointLowering.clear();
847 }
848 
849 void SelectionDAGBuilder::clearDanglingDebugInfo() {
850   DanglingDebugInfoMap.clear();
851 }
852 
853 SDValue SelectionDAGBuilder::getRoot() {
854   if (PendingLoads.empty())
855     return DAG.getRoot();
856 
857   if (PendingLoads.size() == 1) {
858     SDValue Root = PendingLoads[0];
859     DAG.setRoot(Root);
860     PendingLoads.clear();
861     return Root;
862   }
863 
864   // Otherwise, we have to make a token factor node.
865   SDValue Root = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
866                              PendingLoads);
867   PendingLoads.clear();
868   DAG.setRoot(Root);
869   return Root;
870 }
871 
872 SDValue SelectionDAGBuilder::getControlRoot() {
873   SDValue Root = DAG.getRoot();
874 
875   if (PendingExports.empty())
876     return Root;
877 
878   // Turn all of the CopyToReg chains into one factored node.
879   if (Root.getOpcode() != ISD::EntryToken) {
880     unsigned i = 0, e = PendingExports.size();
881     for (; i != e; ++i) {
882       assert(PendingExports[i].getNode()->getNumOperands() > 1);
883       if (PendingExports[i].getNode()->getOperand(0) == Root)
884         break;  // Don't add the root if we already indirectly depend on it.
885     }
886 
887     if (i == e)
888       PendingExports.push_back(Root);
889   }
890 
891   Root = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
892                      PendingExports);
893   PendingExports.clear();
894   DAG.setRoot(Root);
895   return Root;
896 }
897 
898 void SelectionDAGBuilder::visit(const Instruction &I) {
899   // Set up outgoing PHI node register values before emitting the terminator.
900   if (isa<TerminatorInst>(&I)) {
901     HandlePHINodesInSuccessorBlocks(I.getParent());
902   }
903 
904   // Increase the SDNodeOrder if dealing with a non-debug instruction.
905   if (!isa<DbgInfoIntrinsic>(I))
906     ++SDNodeOrder;
907 
908   CurInst = &I;
909 
910   visit(I.getOpcode(), I);
911 
912   if (!isa<TerminatorInst>(&I) && !HasTailCall &&
913       !isStatepoint(&I)) // statepoints handle their exports internally
914     CopyToExportRegsIfNeeded(&I);
915 
916   CurInst = nullptr;
917 }
918 
919 void SelectionDAGBuilder::visitPHI(const PHINode &) {
920   llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!");
921 }
922 
923 void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) {
924   // Note: this doesn't use InstVisitor, because it has to work with
925   // ConstantExpr's in addition to instructions.
926   switch (Opcode) {
927   default: llvm_unreachable("Unknown instruction type encountered!");
928     // Build the switch statement using the Instruction.def file.
929 #define HANDLE_INST(NUM, OPCODE, CLASS) \
930     case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
931 #include "llvm/IR/Instruction.def"
932   }
933 }
934 
935 // resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V,
936 // generate the debug data structures now that we've seen its definition.
937 void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V,
938                                                    SDValue Val) {
939   DanglingDebugInfo &DDI = DanglingDebugInfoMap[V];
940   if (DDI.getDI()) {
941     const DbgValueInst *DI = DDI.getDI();
942     DebugLoc dl = DDI.getdl();
943     unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
944     DILocalVariable *Variable = DI->getVariable();
945     DIExpression *Expr = DI->getExpression();
946     assert(Variable->isValidLocationForIntrinsic(dl) &&
947            "Expected inlined-at fields to agree");
948     uint64_t Offset = DI->getOffset();
949     SDDbgValue *SDV;
950     if (Val.getNode()) {
951       if (!EmitFuncArgumentDbgValue(V, Variable, Expr, dl, Offset, false,
952                                     Val)) {
953         SDV = getDbgValue(Val, Variable, Expr, Offset, dl, DbgSDNodeOrder);
954         DAG.AddDbgValue(SDV, Val.getNode(), false);
955       }
956     } else
957       DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
958     DanglingDebugInfoMap[V] = DanglingDebugInfo();
959   }
960 }
961 
962 /// getCopyFromRegs - If there was virtual register allocated for the value V
963 /// emit CopyFromReg of the specified type Ty. Return empty SDValue() otherwise.
964 SDValue SelectionDAGBuilder::getCopyFromRegs(const Value *V, Type *Ty) {
965   DenseMap<const Value *, unsigned>::iterator It = FuncInfo.ValueMap.find(V);
966   SDValue Result;
967 
968   if (It != FuncInfo.ValueMap.end()) {
969     unsigned InReg = It->second;
970     RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
971                      DAG.getDataLayout(), InReg, Ty);
972     SDValue Chain = DAG.getEntryNode();
973     Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
974     resolveDanglingDebugInfo(V, Result);
975   }
976 
977   return Result;
978 }
979 
980 /// getValue - Return an SDValue for the given Value.
981 SDValue SelectionDAGBuilder::getValue(const Value *V) {
982   // If we already have an SDValue for this value, use it. It's important
983   // to do this first, so that we don't create a CopyFromReg if we already
984   // have a regular SDValue.
985   SDValue &N = NodeMap[V];
986   if (N.getNode()) return N;
987 
988   // If there's a virtual register allocated and initialized for this
989   // value, use it.
990   if (SDValue copyFromReg = getCopyFromRegs(V, V->getType()))
991     return copyFromReg;
992 
993   // Otherwise create a new SDValue and remember it.
994   SDValue Val = getValueImpl(V);
995   NodeMap[V] = Val;
996   resolveDanglingDebugInfo(V, Val);
997   return Val;
998 }
999 
1000 // Return true if SDValue exists for the given Value
1001 bool SelectionDAGBuilder::findValue(const Value *V) const {
1002   return (NodeMap.find(V) != NodeMap.end()) ||
1003     (FuncInfo.ValueMap.find(V) != FuncInfo.ValueMap.end());
1004 }
1005 
1006 /// getNonRegisterValue - Return an SDValue for the given Value, but
1007 /// don't look in FuncInfo.ValueMap for a virtual register.
1008 SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) {
1009   // If we already have an SDValue for this value, use it.
1010   SDValue &N = NodeMap[V];
1011   if (N.getNode()) {
1012     if (isa<ConstantSDNode>(N) || isa<ConstantFPSDNode>(N)) {
1013       // Remove the debug location from the node as the node is about to be used
1014       // in a location which may differ from the original debug location.  This
1015       // is relevant to Constant and ConstantFP nodes because they can appear
1016       // as constant expressions inside PHI nodes.
1017       N->setDebugLoc(DebugLoc());
1018     }
1019     return N;
1020   }
1021 
1022   // Otherwise create a new SDValue and remember it.
1023   SDValue Val = getValueImpl(V);
1024   NodeMap[V] = Val;
1025   resolveDanglingDebugInfo(V, Val);
1026   return Val;
1027 }
1028 
1029 /// getValueImpl - Helper function for getValue and getNonRegisterValue.
1030 /// Create an SDValue for the given value.
1031 SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
1032   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1033 
1034   if (const Constant *C = dyn_cast<Constant>(V)) {
1035     EVT VT = TLI.getValueType(DAG.getDataLayout(), V->getType(), true);
1036 
1037     if (const ConstantInt *CI = dyn_cast<ConstantInt>(C))
1038       return DAG.getConstant(*CI, getCurSDLoc(), VT);
1039 
1040     if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
1041       return DAG.getGlobalAddress(GV, getCurSDLoc(), VT);
1042 
1043     if (isa<ConstantPointerNull>(C)) {
1044       unsigned AS = V->getType()->getPointerAddressSpace();
1045       return DAG.getConstant(0, getCurSDLoc(),
1046                              TLI.getPointerTy(DAG.getDataLayout(), AS));
1047     }
1048 
1049     if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
1050       return DAG.getConstantFP(*CFP, getCurSDLoc(), VT);
1051 
1052     if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
1053       return DAG.getUNDEF(VT);
1054 
1055     if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
1056       visit(CE->getOpcode(), *CE);
1057       SDValue N1 = NodeMap[V];
1058       assert(N1.getNode() && "visit didn't populate the NodeMap!");
1059       return N1;
1060     }
1061 
1062     if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
1063       SmallVector<SDValue, 4> Constants;
1064       for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end();
1065            OI != OE; ++OI) {
1066         SDNode *Val = getValue(*OI).getNode();
1067         // If the operand is an empty aggregate, there are no values.
1068         if (!Val) continue;
1069         // Add each leaf value from the operand to the Constants list
1070         // to form a flattened list of all the values.
1071         for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1072           Constants.push_back(SDValue(Val, i));
1073       }
1074 
1075       return DAG.getMergeValues(Constants, getCurSDLoc());
1076     }
1077 
1078     if (const ConstantDataSequential *CDS =
1079           dyn_cast<ConstantDataSequential>(C)) {
1080       SmallVector<SDValue, 4> Ops;
1081       for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1082         SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode();
1083         // Add each leaf value from the operand to the Constants list
1084         // to form a flattened list of all the values.
1085         for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1086           Ops.push_back(SDValue(Val, i));
1087       }
1088 
1089       if (isa<ArrayType>(CDS->getType()))
1090         return DAG.getMergeValues(Ops, getCurSDLoc());
1091       return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1092     }
1093 
1094     if (C->getType()->isStructTy() || C->getType()->isArrayTy()) {
1095       assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
1096              "Unknown struct or array constant!");
1097 
1098       SmallVector<EVT, 4> ValueVTs;
1099       ComputeValueVTs(TLI, DAG.getDataLayout(), C->getType(), ValueVTs);
1100       unsigned NumElts = ValueVTs.size();
1101       if (NumElts == 0)
1102         return SDValue(); // empty struct
1103       SmallVector<SDValue, 4> Constants(NumElts);
1104       for (unsigned i = 0; i != NumElts; ++i) {
1105         EVT EltVT = ValueVTs[i];
1106         if (isa<UndefValue>(C))
1107           Constants[i] = DAG.getUNDEF(EltVT);
1108         else if (EltVT.isFloatingPoint())
1109           Constants[i] = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1110         else
1111           Constants[i] = DAG.getConstant(0, getCurSDLoc(), EltVT);
1112       }
1113 
1114       return DAG.getMergeValues(Constants, getCurSDLoc());
1115     }
1116 
1117     if (const BlockAddress *BA = dyn_cast<BlockAddress>(C))
1118       return DAG.getBlockAddress(BA, VT);
1119 
1120     VectorType *VecTy = cast<VectorType>(V->getType());
1121     unsigned NumElements = VecTy->getNumElements();
1122 
1123     // Now that we know the number and type of the elements, get that number of
1124     // elements into the Ops array based on what kind of constant it is.
1125     SmallVector<SDValue, 16> Ops;
1126     if (const ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
1127       for (unsigned i = 0; i != NumElements; ++i)
1128         Ops.push_back(getValue(CV->getOperand(i)));
1129     } else {
1130       assert(isa<ConstantAggregateZero>(C) && "Unknown vector constant!");
1131       EVT EltVT =
1132           TLI.getValueType(DAG.getDataLayout(), VecTy->getElementType());
1133 
1134       SDValue Op;
1135       if (EltVT.isFloatingPoint())
1136         Op = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1137       else
1138         Op = DAG.getConstant(0, getCurSDLoc(), EltVT);
1139       Ops.assign(NumElements, Op);
1140     }
1141 
1142     // Create a BUILD_VECTOR node.
1143     return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1144   }
1145 
1146   // If this is a static alloca, generate it as the frameindex instead of
1147   // computation.
1148   if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1149     DenseMap<const AllocaInst*, int>::iterator SI =
1150       FuncInfo.StaticAllocaMap.find(AI);
1151     if (SI != FuncInfo.StaticAllocaMap.end())
1152       return DAG.getFrameIndex(SI->second,
1153                                TLI.getFrameIndexTy(DAG.getDataLayout()));
1154   }
1155 
1156   // If this is an instruction which fast-isel has deferred, select it now.
1157   if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
1158     unsigned InReg = FuncInfo.InitializeRegForValue(Inst);
1159     RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg,
1160                      Inst->getType());
1161     SDValue Chain = DAG.getEntryNode();
1162     return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
1163   }
1164 
1165   llvm_unreachable("Can't get register for value!");
1166 }
1167 
1168 void SelectionDAGBuilder::visitCatchPad(const CatchPadInst &I) {
1169   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1170   bool IsMSVCCXX = Pers == EHPersonality::MSVC_CXX;
1171   bool IsCoreCLR = Pers == EHPersonality::CoreCLR;
1172   MachineBasicBlock *CatchPadMBB = FuncInfo.MBB;
1173   // In MSVC C++ and CoreCLR, catchblocks are funclets and need prologues.
1174   if (IsMSVCCXX || IsCoreCLR)
1175     CatchPadMBB->setIsEHFuncletEntry();
1176 
1177   DAG.setRoot(DAG.getNode(ISD::CATCHPAD, getCurSDLoc(), MVT::Other, getControlRoot()));
1178 }
1179 
1180 void SelectionDAGBuilder::visitCatchRet(const CatchReturnInst &I) {
1181   // Update machine-CFG edge.
1182   MachineBasicBlock *TargetMBB = FuncInfo.MBBMap[I.getSuccessor()];
1183   FuncInfo.MBB->addSuccessor(TargetMBB);
1184 
1185   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1186   bool IsSEH = isAsynchronousEHPersonality(Pers);
1187   if (IsSEH) {
1188     // If this is not a fall-through branch or optimizations are switched off,
1189     // emit the branch.
1190     if (TargetMBB != NextBlock(FuncInfo.MBB) ||
1191         TM.getOptLevel() == CodeGenOpt::None)
1192       DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
1193                               getControlRoot(), DAG.getBasicBlock(TargetMBB)));
1194     return;
1195   }
1196 
1197   // Figure out the funclet membership for the catchret's successor.
1198   // This will be used by the FuncletLayout pass to determine how to order the
1199   // BB's.
1200   // A 'catchret' returns to the outer scope's color.
1201   Value *ParentPad = I.getCatchSwitchParentPad();
1202   const BasicBlock *SuccessorColor;
1203   if (isa<ConstantTokenNone>(ParentPad))
1204     SuccessorColor = &FuncInfo.Fn->getEntryBlock();
1205   else
1206     SuccessorColor = cast<Instruction>(ParentPad)->getParent();
1207   assert(SuccessorColor && "No parent funclet for catchret!");
1208   MachineBasicBlock *SuccessorColorMBB = FuncInfo.MBBMap[SuccessorColor];
1209   assert(SuccessorColorMBB && "No MBB for SuccessorColor!");
1210 
1211   // Create the terminator node.
1212   SDValue Ret = DAG.getNode(ISD::CATCHRET, getCurSDLoc(), MVT::Other,
1213                             getControlRoot(), DAG.getBasicBlock(TargetMBB),
1214                             DAG.getBasicBlock(SuccessorColorMBB));
1215   DAG.setRoot(Ret);
1216 }
1217 
1218 void SelectionDAGBuilder::visitCleanupPad(const CleanupPadInst &CPI) {
1219   // Don't emit any special code for the cleanuppad instruction. It just marks
1220   // the start of a funclet.
1221   FuncInfo.MBB->setIsEHFuncletEntry();
1222   FuncInfo.MBB->setIsCleanupFuncletEntry();
1223 }
1224 
1225 /// When an invoke or a cleanupret unwinds to the next EH pad, there are
1226 /// many places it could ultimately go. In the IR, we have a single unwind
1227 /// destination, but in the machine CFG, we enumerate all the possible blocks.
1228 /// This function skips over imaginary basic blocks that hold catchswitch
1229 /// instructions, and finds all the "real" machine
1230 /// basic block destinations. As those destinations may not be successors of
1231 /// EHPadBB, here we also calculate the edge probability to those destinations.
1232 /// The passed-in Prob is the edge probability to EHPadBB.
1233 static void findUnwindDestinations(
1234     FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
1235     BranchProbability Prob,
1236     SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
1237         &UnwindDests) {
1238   EHPersonality Personality =
1239     classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1240   bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX;
1241   bool IsCoreCLR = Personality == EHPersonality::CoreCLR;
1242 
1243   while (EHPadBB) {
1244     const Instruction *Pad = EHPadBB->getFirstNonPHI();
1245     BasicBlock *NewEHPadBB = nullptr;
1246     if (isa<LandingPadInst>(Pad)) {
1247       // Stop on landingpads. They are not funclets.
1248       UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1249       break;
1250     } else if (isa<CleanupPadInst>(Pad)) {
1251       // Stop on cleanup pads. Cleanups are always funclet entries for all known
1252       // personalities.
1253       UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1254       UnwindDests.back().first->setIsEHFuncletEntry();
1255       break;
1256     } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
1257       // Add the catchpad handlers to the possible destinations.
1258       for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
1259         UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob);
1260         // For MSVC++ and the CLR, catchblocks are funclets and need prologues.
1261         if (IsMSVCCXX || IsCoreCLR)
1262           UnwindDests.back().first->setIsEHFuncletEntry();
1263       }
1264       NewEHPadBB = CatchSwitch->getUnwindDest();
1265     } else {
1266       continue;
1267     }
1268 
1269     BranchProbabilityInfo *BPI = FuncInfo.BPI;
1270     if (BPI && NewEHPadBB)
1271       Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB);
1272     EHPadBB = NewEHPadBB;
1273   }
1274 }
1275 
1276 void SelectionDAGBuilder::visitCleanupRet(const CleanupReturnInst &I) {
1277   // Update successor info.
1278   SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
1279   auto UnwindDest = I.getUnwindDest();
1280   BranchProbabilityInfo *BPI = FuncInfo.BPI;
1281   BranchProbability UnwindDestProb =
1282       (BPI && UnwindDest)
1283           ? BPI->getEdgeProbability(FuncInfo.MBB->getBasicBlock(), UnwindDest)
1284           : BranchProbability::getZero();
1285   findUnwindDestinations(FuncInfo, UnwindDest, UnwindDestProb, UnwindDests);
1286   for (auto &UnwindDest : UnwindDests) {
1287     UnwindDest.first->setIsEHPad();
1288     addSuccessorWithProb(FuncInfo.MBB, UnwindDest.first, UnwindDest.second);
1289   }
1290   FuncInfo.MBB->normalizeSuccProbs();
1291 
1292   // Create the terminator node.
1293   SDValue Ret =
1294       DAG.getNode(ISD::CLEANUPRET, getCurSDLoc(), MVT::Other, getControlRoot());
1295   DAG.setRoot(Ret);
1296 }
1297 
1298 void SelectionDAGBuilder::visitCatchSwitch(const CatchSwitchInst &CSI) {
1299   report_fatal_error("visitCatchSwitch not yet implemented!");
1300 }
1301 
1302 void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
1303   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1304   auto &DL = DAG.getDataLayout();
1305   SDValue Chain = getControlRoot();
1306   SmallVector<ISD::OutputArg, 8> Outs;
1307   SmallVector<SDValue, 8> OutVals;
1308 
1309   // Calls to @llvm.experimental.deoptimize don't generate a return value, so
1310   // lower
1311   //
1312   //   %val = call <ty> @llvm.experimental.deoptimize()
1313   //   ret <ty> %val
1314   //
1315   // differently.
1316   if (I.getParent()->getTerminatingDeoptimizeCall()) {
1317     LowerDeoptimizingReturn();
1318     return;
1319   }
1320 
1321   if (!FuncInfo.CanLowerReturn) {
1322     unsigned DemoteReg = FuncInfo.DemoteRegister;
1323     const Function *F = I.getParent()->getParent();
1324 
1325     // Emit a store of the return value through the virtual register.
1326     // Leave Outs empty so that LowerReturn won't try to load return
1327     // registers the usual way.
1328     SmallVector<EVT, 1> PtrValueVTs;
1329     ComputeValueVTs(TLI, DL, PointerType::getUnqual(F->getReturnType()),
1330                     PtrValueVTs);
1331 
1332     SDValue RetPtr = DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(),
1333                                         DemoteReg, PtrValueVTs[0]);
1334     SDValue RetOp = getValue(I.getOperand(0));
1335 
1336     SmallVector<EVT, 4> ValueVTs;
1337     SmallVector<uint64_t, 4> Offsets;
1338     ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs, &Offsets);
1339     unsigned NumValues = ValueVTs.size();
1340 
1341     // An aggregate return value cannot wrap around the address space, so
1342     // offsets to its parts don't wrap either.
1343     SDNodeFlags Flags;
1344     Flags.setNoUnsignedWrap(true);
1345 
1346     SmallVector<SDValue, 4> Chains(NumValues);
1347     for (unsigned i = 0; i != NumValues; ++i) {
1348       SDValue Add = DAG.getNode(ISD::ADD, getCurSDLoc(),
1349                                 RetPtr.getValueType(), RetPtr,
1350                                 DAG.getIntPtrConstant(Offsets[i],
1351                                                       getCurSDLoc()),
1352                                 &Flags);
1353       Chains[i] = DAG.getStore(Chain, getCurSDLoc(),
1354                                SDValue(RetOp.getNode(), RetOp.getResNo() + i),
1355                                // FIXME: better loc info would be nice.
1356                                Add, MachinePointerInfo());
1357     }
1358 
1359     Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
1360                         MVT::Other, Chains);
1361   } else if (I.getNumOperands() != 0) {
1362     SmallVector<EVT, 4> ValueVTs;
1363     ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs);
1364     unsigned NumValues = ValueVTs.size();
1365     if (NumValues) {
1366       SDValue RetOp = getValue(I.getOperand(0));
1367 
1368       const Function *F = I.getParent()->getParent();
1369 
1370       ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
1371       if (F->getAttributes().hasAttribute(AttributeList::ReturnIndex,
1372                                           Attribute::SExt))
1373         ExtendKind = ISD::SIGN_EXTEND;
1374       else if (F->getAttributes().hasAttribute(AttributeList::ReturnIndex,
1375                                                Attribute::ZExt))
1376         ExtendKind = ISD::ZERO_EXTEND;
1377 
1378       LLVMContext &Context = F->getContext();
1379       bool RetInReg = F->getAttributes().hasAttribute(
1380           AttributeList::ReturnIndex, Attribute::InReg);
1381 
1382       for (unsigned j = 0; j != NumValues; ++j) {
1383         EVT VT = ValueVTs[j];
1384 
1385         if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
1386           VT = TLI.getTypeForExtReturn(Context, VT, ExtendKind);
1387 
1388         unsigned NumParts = TLI.getNumRegisters(Context, VT);
1389         MVT PartVT = TLI.getRegisterType(Context, VT);
1390         SmallVector<SDValue, 4> Parts(NumParts);
1391         getCopyToParts(DAG, getCurSDLoc(),
1392                        SDValue(RetOp.getNode(), RetOp.getResNo() + j),
1393                        &Parts[0], NumParts, PartVT, &I, ExtendKind);
1394 
1395         // 'inreg' on function refers to return value
1396         ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1397         if (RetInReg)
1398           Flags.setInReg();
1399 
1400         // Propagate extension type if any
1401         if (ExtendKind == ISD::SIGN_EXTEND)
1402           Flags.setSExt();
1403         else if (ExtendKind == ISD::ZERO_EXTEND)
1404           Flags.setZExt();
1405 
1406         for (unsigned i = 0; i < NumParts; ++i) {
1407           Outs.push_back(ISD::OutputArg(Flags, Parts[i].getValueType(),
1408                                         VT, /*isfixed=*/true, 0, 0));
1409           OutVals.push_back(Parts[i]);
1410         }
1411       }
1412     }
1413   }
1414 
1415   // Push in swifterror virtual register as the last element of Outs. This makes
1416   // sure swifterror virtual register will be returned in the swifterror
1417   // physical register.
1418   const Function *F = I.getParent()->getParent();
1419   if (TLI.supportSwiftError() &&
1420       F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
1421     assert(FuncInfo.SwiftErrorArg && "Need a swift error argument");
1422     ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1423     Flags.setSwiftError();
1424     Outs.push_back(ISD::OutputArg(Flags, EVT(TLI.getPointerTy(DL)) /*vt*/,
1425                                   EVT(TLI.getPointerTy(DL)) /*argvt*/,
1426                                   true /*isfixed*/, 1 /*origidx*/,
1427                                   0 /*partOffs*/));
1428     // Create SDNode for the swifterror virtual register.
1429     OutVals.push_back(DAG.getRegister(FuncInfo.getOrCreateSwiftErrorVReg(
1430                                           FuncInfo.MBB, FuncInfo.SwiftErrorArg),
1431                                       EVT(TLI.getPointerTy(DL))));
1432   }
1433 
1434   bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
1435   CallingConv::ID CallConv =
1436     DAG.getMachineFunction().getFunction()->getCallingConv();
1437   Chain = DAG.getTargetLoweringInfo().LowerReturn(
1438       Chain, CallConv, isVarArg, Outs, OutVals, getCurSDLoc(), DAG);
1439 
1440   // Verify that the target's LowerReturn behaved as expected.
1441   assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
1442          "LowerReturn didn't return a valid chain!");
1443 
1444   // Update the DAG with the new chain value resulting from return lowering.
1445   DAG.setRoot(Chain);
1446 }
1447 
1448 /// CopyToExportRegsIfNeeded - If the given value has virtual registers
1449 /// created for it, emit nodes to copy the value into the virtual
1450 /// registers.
1451 void SelectionDAGBuilder::CopyToExportRegsIfNeeded(const Value *V) {
1452   // Skip empty types
1453   if (V->getType()->isEmptyTy())
1454     return;
1455 
1456   DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
1457   if (VMI != FuncInfo.ValueMap.end()) {
1458     assert(!V->use_empty() && "Unused value assigned virtual registers!");
1459     CopyValueToVirtualRegister(V, VMI->second);
1460   }
1461 }
1462 
1463 /// ExportFromCurrentBlock - If this condition isn't known to be exported from
1464 /// the current basic block, add it to ValueMap now so that we'll get a
1465 /// CopyTo/FromReg.
1466 void SelectionDAGBuilder::ExportFromCurrentBlock(const Value *V) {
1467   // No need to export constants.
1468   if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
1469 
1470   // Already exported?
1471   if (FuncInfo.isExportedInst(V)) return;
1472 
1473   unsigned Reg = FuncInfo.InitializeRegForValue(V);
1474   CopyValueToVirtualRegister(V, Reg);
1475 }
1476 
1477 bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V,
1478                                                      const BasicBlock *FromBB) {
1479   // The operands of the setcc have to be in this block.  We don't know
1480   // how to export them from some other block.
1481   if (const Instruction *VI = dyn_cast<Instruction>(V)) {
1482     // Can export from current BB.
1483     if (VI->getParent() == FromBB)
1484       return true;
1485 
1486     // Is already exported, noop.
1487     return FuncInfo.isExportedInst(V);
1488   }
1489 
1490   // If this is an argument, we can export it if the BB is the entry block or
1491   // if it is already exported.
1492   if (isa<Argument>(V)) {
1493     if (FromBB == &FromBB->getParent()->getEntryBlock())
1494       return true;
1495 
1496     // Otherwise, can only export this if it is already exported.
1497     return FuncInfo.isExportedInst(V);
1498   }
1499 
1500   // Otherwise, constants can always be exported.
1501   return true;
1502 }
1503 
1504 /// Return branch probability calculated by BranchProbabilityInfo for IR blocks.
1505 BranchProbability
1506 SelectionDAGBuilder::getEdgeProbability(const MachineBasicBlock *Src,
1507                                         const MachineBasicBlock *Dst) const {
1508   BranchProbabilityInfo *BPI = FuncInfo.BPI;
1509   const BasicBlock *SrcBB = Src->getBasicBlock();
1510   const BasicBlock *DstBB = Dst->getBasicBlock();
1511   if (!BPI) {
1512     // If BPI is not available, set the default probability as 1 / N, where N is
1513     // the number of successors.
1514     auto SuccSize = std::max<uint32_t>(
1515         std::distance(succ_begin(SrcBB), succ_end(SrcBB)), 1);
1516     return BranchProbability(1, SuccSize);
1517   }
1518   return BPI->getEdgeProbability(SrcBB, DstBB);
1519 }
1520 
1521 void SelectionDAGBuilder::addSuccessorWithProb(MachineBasicBlock *Src,
1522                                                MachineBasicBlock *Dst,
1523                                                BranchProbability Prob) {
1524   if (!FuncInfo.BPI)
1525     Src->addSuccessorWithoutProb(Dst);
1526   else {
1527     if (Prob.isUnknown())
1528       Prob = getEdgeProbability(Src, Dst);
1529     Src->addSuccessor(Dst, Prob);
1530   }
1531 }
1532 
1533 static bool InBlock(const Value *V, const BasicBlock *BB) {
1534   if (const Instruction *I = dyn_cast<Instruction>(V))
1535     return I->getParent() == BB;
1536   return true;
1537 }
1538 
1539 /// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
1540 /// This function emits a branch and is used at the leaves of an OR or an
1541 /// AND operator tree.
1542 ///
1543 void
1544 SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
1545                                                   MachineBasicBlock *TBB,
1546                                                   MachineBasicBlock *FBB,
1547                                                   MachineBasicBlock *CurBB,
1548                                                   MachineBasicBlock *SwitchBB,
1549                                                   BranchProbability TProb,
1550                                                   BranchProbability FProb,
1551                                                   bool InvertCond) {
1552   const BasicBlock *BB = CurBB->getBasicBlock();
1553 
1554   // If the leaf of the tree is a comparison, merge the condition into
1555   // the caseblock.
1556   if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
1557     // The operands of the cmp have to be in this block.  We don't know
1558     // how to export them from some other block.  If this is the first block
1559     // of the sequence, no exporting is needed.
1560     if (CurBB == SwitchBB ||
1561         (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
1562          isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
1563       ISD::CondCode Condition;
1564       if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
1565         ICmpInst::Predicate Pred =
1566             InvertCond ? IC->getInversePredicate() : IC->getPredicate();
1567         Condition = getICmpCondCode(Pred);
1568       } else {
1569         const FCmpInst *FC = cast<FCmpInst>(Cond);
1570         FCmpInst::Predicate Pred =
1571             InvertCond ? FC->getInversePredicate() : FC->getPredicate();
1572         Condition = getFCmpCondCode(Pred);
1573         if (TM.Options.NoNaNsFPMath)
1574           Condition = getFCmpCodeWithoutNaN(Condition);
1575       }
1576 
1577       CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1), nullptr,
1578                    TBB, FBB, CurBB, TProb, FProb);
1579       SwitchCases.push_back(CB);
1580       return;
1581     }
1582   }
1583 
1584   // Create a CaseBlock record representing this branch.
1585   ISD::CondCode Opc = InvertCond ? ISD::SETNE : ISD::SETEQ;
1586   CaseBlock CB(Opc, Cond, ConstantInt::getTrue(*DAG.getContext()),
1587                nullptr, TBB, FBB, CurBB, TProb, FProb);
1588   SwitchCases.push_back(CB);
1589 }
1590 
1591 /// FindMergedConditions - If Cond is an expression like
1592 void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
1593                                                MachineBasicBlock *TBB,
1594                                                MachineBasicBlock *FBB,
1595                                                MachineBasicBlock *CurBB,
1596                                                MachineBasicBlock *SwitchBB,
1597                                                Instruction::BinaryOps Opc,
1598                                                BranchProbability TProb,
1599                                                BranchProbability FProb,
1600                                                bool InvertCond) {
1601   // Skip over not part of the tree and remember to invert op and operands at
1602   // next level.
1603   if (BinaryOperator::isNot(Cond) && Cond->hasOneUse()) {
1604     const Value *CondOp = BinaryOperator::getNotArgument(Cond);
1605     if (InBlock(CondOp, CurBB->getBasicBlock())) {
1606       FindMergedConditions(CondOp, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
1607                            !InvertCond);
1608       return;
1609     }
1610   }
1611 
1612   const Instruction *BOp = dyn_cast<Instruction>(Cond);
1613   // Compute the effective opcode for Cond, taking into account whether it needs
1614   // to be inverted, e.g.
1615   //   and (not (or A, B)), C
1616   // gets lowered as
1617   //   and (and (not A, not B), C)
1618   unsigned BOpc = 0;
1619   if (BOp) {
1620     BOpc = BOp->getOpcode();
1621     if (InvertCond) {
1622       if (BOpc == Instruction::And)
1623         BOpc = Instruction::Or;
1624       else if (BOpc == Instruction::Or)
1625         BOpc = Instruction::And;
1626     }
1627   }
1628 
1629   // If this node is not part of the or/and tree, emit it as a branch.
1630   if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) ||
1631       BOpc != Opc || !BOp->hasOneUse() ||
1632       BOp->getParent() != CurBB->getBasicBlock() ||
1633       !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) ||
1634       !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) {
1635     EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB,
1636                                  TProb, FProb, InvertCond);
1637     return;
1638   }
1639 
1640   //  Create TmpBB after CurBB.
1641   MachineFunction::iterator BBI(CurBB);
1642   MachineFunction &MF = DAG.getMachineFunction();
1643   MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
1644   CurBB->getParent()->insert(++BBI, TmpBB);
1645 
1646   if (Opc == Instruction::Or) {
1647     // Codegen X | Y as:
1648     // BB1:
1649     //   jmp_if_X TBB
1650     //   jmp TmpBB
1651     // TmpBB:
1652     //   jmp_if_Y TBB
1653     //   jmp FBB
1654     //
1655 
1656     // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
1657     // The requirement is that
1658     //   TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
1659     //     = TrueProb for original BB.
1660     // Assuming the original probabilities are A and B, one choice is to set
1661     // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
1662     // A/(1+B) and 2B/(1+B). This choice assumes that
1663     //   TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
1664     // Another choice is to assume TrueProb for BB1 equals to TrueProb for
1665     // TmpBB, but the math is more complicated.
1666 
1667     auto NewTrueProb = TProb / 2;
1668     auto NewFalseProb = TProb / 2 + FProb;
1669     // Emit the LHS condition.
1670     FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, SwitchBB, Opc,
1671                          NewTrueProb, NewFalseProb, InvertCond);
1672 
1673     // Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
1674     SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb};
1675     BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
1676     // Emit the RHS condition into TmpBB.
1677     FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc,
1678                          Probs[0], Probs[1], InvertCond);
1679   } else {
1680     assert(Opc == Instruction::And && "Unknown merge op!");
1681     // Codegen X & Y as:
1682     // BB1:
1683     //   jmp_if_X TmpBB
1684     //   jmp FBB
1685     // TmpBB:
1686     //   jmp_if_Y TBB
1687     //   jmp FBB
1688     //
1689     //  This requires creation of TmpBB after CurBB.
1690 
1691     // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
1692     // The requirement is that
1693     //   FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
1694     //     = FalseProb for original BB.
1695     // Assuming the original probabilities are A and B, one choice is to set
1696     // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
1697     // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
1698     // TrueProb for BB1 * FalseProb for TmpBB.
1699 
1700     auto NewTrueProb = TProb + FProb / 2;
1701     auto NewFalseProb = FProb / 2;
1702     // Emit the LHS condition.
1703     FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, SwitchBB, Opc,
1704                          NewTrueProb, NewFalseProb, InvertCond);
1705 
1706     // Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
1707     SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2};
1708     BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
1709     // Emit the RHS condition into TmpBB.
1710     FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc,
1711                          Probs[0], Probs[1], InvertCond);
1712   }
1713 }
1714 
1715 /// If the set of cases should be emitted as a series of branches, return true.
1716 /// If we should emit this as a bunch of and/or'd together conditions, return
1717 /// false.
1718 bool
1719 SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases) {
1720   if (Cases.size() != 2) return true;
1721 
1722   // If this is two comparisons of the same values or'd or and'd together, they
1723   // will get folded into a single comparison, so don't emit two blocks.
1724   if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
1725        Cases[0].CmpRHS == Cases[1].CmpRHS) ||
1726       (Cases[0].CmpRHS == Cases[1].CmpLHS &&
1727        Cases[0].CmpLHS == Cases[1].CmpRHS)) {
1728     return false;
1729   }
1730 
1731   // Handle: (X != null) | (Y != null) --> (X|Y) != 0
1732   // Handle: (X == null) & (Y == null) --> (X|Y) == 0
1733   if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
1734       Cases[0].CC == Cases[1].CC &&
1735       isa<Constant>(Cases[0].CmpRHS) &&
1736       cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
1737     if (Cases[0].CC == ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
1738       return false;
1739     if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
1740       return false;
1741   }
1742 
1743   return true;
1744 }
1745 
1746 void SelectionDAGBuilder::visitBr(const BranchInst &I) {
1747   MachineBasicBlock *BrMBB = FuncInfo.MBB;
1748 
1749   // Update machine-CFG edges.
1750   MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
1751 
1752   if (I.isUnconditional()) {
1753     // Update machine-CFG edges.
1754     BrMBB->addSuccessor(Succ0MBB);
1755 
1756     // If this is not a fall-through branch or optimizations are switched off,
1757     // emit the branch.
1758     if (Succ0MBB != NextBlock(BrMBB) || TM.getOptLevel() == CodeGenOpt::None)
1759       DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
1760                               MVT::Other, getControlRoot(),
1761                               DAG.getBasicBlock(Succ0MBB)));
1762 
1763     return;
1764   }
1765 
1766   // If this condition is one of the special cases we handle, do special stuff
1767   // now.
1768   const Value *CondVal = I.getCondition();
1769   MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
1770 
1771   // If this is a series of conditions that are or'd or and'd together, emit
1772   // this as a sequence of branches instead of setcc's with and/or operations.
1773   // As long as jumps are not expensive, this should improve performance.
1774   // For example, instead of something like:
1775   //     cmp A, B
1776   //     C = seteq
1777   //     cmp D, E
1778   //     F = setle
1779   //     or C, F
1780   //     jnz foo
1781   // Emit:
1782   //     cmp A, B
1783   //     je foo
1784   //     cmp D, E
1785   //     jle foo
1786   //
1787   if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
1788     Instruction::BinaryOps Opcode = BOp->getOpcode();
1789     if (!DAG.getTargetLoweringInfo().isJumpExpensive() && BOp->hasOneUse() &&
1790         !I.getMetadata(LLVMContext::MD_unpredictable) &&
1791         (Opcode == Instruction::And || Opcode == Instruction::Or)) {
1792       FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB,
1793                            Opcode,
1794                            getEdgeProbability(BrMBB, Succ0MBB),
1795                            getEdgeProbability(BrMBB, Succ1MBB),
1796                            /*InvertCond=*/false);
1797       // If the compares in later blocks need to use values not currently
1798       // exported from this block, export them now.  This block should always
1799       // be the first entry.
1800       assert(SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!");
1801 
1802       // Allow some cases to be rejected.
1803       if (ShouldEmitAsBranches(SwitchCases)) {
1804         for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) {
1805           ExportFromCurrentBlock(SwitchCases[i].CmpLHS);
1806           ExportFromCurrentBlock(SwitchCases[i].CmpRHS);
1807         }
1808 
1809         // Emit the branch for this block.
1810         visitSwitchCase(SwitchCases[0], BrMBB);
1811         SwitchCases.erase(SwitchCases.begin());
1812         return;
1813       }
1814 
1815       // Okay, we decided not to do this, remove any inserted MBB's and clear
1816       // SwitchCases.
1817       for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i)
1818         FuncInfo.MF->erase(SwitchCases[i].ThisBB);
1819 
1820       SwitchCases.clear();
1821     }
1822   }
1823 
1824   // Create a CaseBlock record representing this branch.
1825   CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
1826                nullptr, Succ0MBB, Succ1MBB, BrMBB);
1827 
1828   // Use visitSwitchCase to actually insert the fast branch sequence for this
1829   // cond branch.
1830   visitSwitchCase(CB, BrMBB);
1831 }
1832 
1833 /// visitSwitchCase - Emits the necessary code to represent a single node in
1834 /// the binary search tree resulting from lowering a switch instruction.
1835 void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB,
1836                                           MachineBasicBlock *SwitchBB) {
1837   SDValue Cond;
1838   SDValue CondLHS = getValue(CB.CmpLHS);
1839   SDLoc dl = getCurSDLoc();
1840 
1841   // Build the setcc now.
1842   if (!CB.CmpMHS) {
1843     // Fold "(X == true)" to X and "(X == false)" to !X to
1844     // handle common cases produced by branch lowering.
1845     if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
1846         CB.CC == ISD::SETEQ)
1847       Cond = CondLHS;
1848     else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) &&
1849              CB.CC == ISD::SETEQ) {
1850       SDValue True = DAG.getConstant(1, dl, CondLHS.getValueType());
1851       Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
1852     } else
1853       Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC);
1854   } else {
1855     assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
1856 
1857     const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
1858     const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
1859 
1860     SDValue CmpOp = getValue(CB.CmpMHS);
1861     EVT VT = CmpOp.getValueType();
1862 
1863     if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
1864       Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, dl, VT),
1865                           ISD::SETLE);
1866     } else {
1867       SDValue SUB = DAG.getNode(ISD::SUB, dl,
1868                                 VT, CmpOp, DAG.getConstant(Low, dl, VT));
1869       Cond = DAG.getSetCC(dl, MVT::i1, SUB,
1870                           DAG.getConstant(High-Low, dl, VT), ISD::SETULE);
1871     }
1872   }
1873 
1874   // Update successor info
1875   addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
1876   // TrueBB and FalseBB are always different unless the incoming IR is
1877   // degenerate. This only happens when running llc on weird IR.
1878   if (CB.TrueBB != CB.FalseBB)
1879     addSuccessorWithProb(SwitchBB, CB.FalseBB, CB.FalseProb);
1880   SwitchBB->normalizeSuccProbs();
1881 
1882   // If the lhs block is the next block, invert the condition so that we can
1883   // fall through to the lhs instead of the rhs block.
1884   if (CB.TrueBB == NextBlock(SwitchBB)) {
1885     std::swap(CB.TrueBB, CB.FalseBB);
1886     SDValue True = DAG.getConstant(1, dl, Cond.getValueType());
1887     Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
1888   }
1889 
1890   SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
1891                                MVT::Other, getControlRoot(), Cond,
1892                                DAG.getBasicBlock(CB.TrueBB));
1893 
1894   // Insert the false branch. Do this even if it's a fall through branch,
1895   // this makes it easier to do DAG optimizations which require inverting
1896   // the branch condition.
1897   BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
1898                        DAG.getBasicBlock(CB.FalseBB));
1899 
1900   DAG.setRoot(BrCond);
1901 }
1902 
1903 /// visitJumpTable - Emit JumpTable node in the current MBB
1904 void SelectionDAGBuilder::visitJumpTable(JumpTable &JT) {
1905   // Emit the code for the jump table
1906   assert(JT.Reg != -1U && "Should lower JT Header first!");
1907   EVT PTy = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
1908   SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurSDLoc(),
1909                                      JT.Reg, PTy);
1910   SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
1911   SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, getCurSDLoc(),
1912                                     MVT::Other, Index.getValue(1),
1913                                     Table, Index);
1914   DAG.setRoot(BrJumpTable);
1915 }
1916 
1917 /// visitJumpTableHeader - This function emits necessary code to produce index
1918 /// in the JumpTable from switch case.
1919 void SelectionDAGBuilder::visitJumpTableHeader(JumpTable &JT,
1920                                                JumpTableHeader &JTH,
1921                                                MachineBasicBlock *SwitchBB) {
1922   SDLoc dl = getCurSDLoc();
1923 
1924   // Subtract the lowest switch case value from the value being switched on and
1925   // conditional branch to default mbb if the result is greater than the
1926   // difference between smallest and largest cases.
1927   SDValue SwitchOp = getValue(JTH.SValue);
1928   EVT VT = SwitchOp.getValueType();
1929   SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp,
1930                             DAG.getConstant(JTH.First, dl, VT));
1931 
1932   // The SDNode we just created, which holds the value being switched on minus
1933   // the smallest case value, needs to be copied to a virtual register so it
1934   // can be used as an index into the jump table in a subsequent basic block.
1935   // This value may be smaller or larger than the target's pointer type, and
1936   // therefore require extension or truncating.
1937   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1938   SwitchOp = DAG.getZExtOrTrunc(Sub, dl, TLI.getPointerTy(DAG.getDataLayout()));
1939 
1940   unsigned JumpTableReg =
1941       FuncInfo.CreateReg(TLI.getPointerTy(DAG.getDataLayout()));
1942   SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl,
1943                                     JumpTableReg, SwitchOp);
1944   JT.Reg = JumpTableReg;
1945 
1946   // Emit the range check for the jump table, and branch to the default block
1947   // for the switch statement if the value being switched on exceeds the largest
1948   // case in the switch.
1949   SDValue CMP = DAG.getSetCC(
1950       dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
1951                                  Sub.getValueType()),
1952       Sub, DAG.getConstant(JTH.Last - JTH.First, dl, VT), ISD::SETUGT);
1953 
1954   SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
1955                                MVT::Other, CopyTo, CMP,
1956                                DAG.getBasicBlock(JT.Default));
1957 
1958   // Avoid emitting unnecessary branches to the next block.
1959   if (JT.MBB != NextBlock(SwitchBB))
1960     BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
1961                          DAG.getBasicBlock(JT.MBB));
1962 
1963   DAG.setRoot(BrCond);
1964 }
1965 
1966 /// Create a LOAD_STACK_GUARD node, and let it carry the target specific global
1967 /// variable if there exists one.
1968 static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL,
1969                                  SDValue &Chain) {
1970   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1971   EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
1972   MachineFunction &MF = DAG.getMachineFunction();
1973   Value *Global = TLI.getSDagStackGuard(*MF.getFunction()->getParent());
1974   MachineSDNode *Node =
1975       DAG.getMachineNode(TargetOpcode::LOAD_STACK_GUARD, DL, PtrTy, Chain);
1976   if (Global) {
1977     MachinePointerInfo MPInfo(Global);
1978     MachineInstr::mmo_iterator MemRefs = MF.allocateMemRefsArray(1);
1979     auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
1980                  MachineMemOperand::MODereferenceable;
1981     *MemRefs = MF.getMachineMemOperand(MPInfo, Flags, PtrTy.getSizeInBits() / 8,
1982                                        DAG.getEVTAlignment(PtrTy));
1983     Node->setMemRefs(MemRefs, MemRefs + 1);
1984   }
1985   return SDValue(Node, 0);
1986 }
1987 
1988 /// Codegen a new tail for a stack protector check ParentMBB which has had its
1989 /// tail spliced into a stack protector check success bb.
1990 ///
1991 /// For a high level explanation of how this fits into the stack protector
1992 /// generation see the comment on the declaration of class
1993 /// StackProtectorDescriptor.
1994 void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
1995                                                   MachineBasicBlock *ParentBB) {
1996 
1997   // First create the loads to the guard/stack slot for the comparison.
1998   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1999   EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
2000 
2001   MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
2002   int FI = MFI.getStackProtectorIndex();
2003 
2004   SDValue Guard;
2005   SDLoc dl = getCurSDLoc();
2006   SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy);
2007   const Module &M = *ParentBB->getParent()->getFunction()->getParent();
2008   unsigned Align = DL->getPrefTypeAlignment(Type::getInt8PtrTy(M.getContext()));
2009 
2010   // Generate code to load the content of the guard slot.
2011   SDValue StackSlot = DAG.getLoad(
2012       PtrTy, dl, DAG.getEntryNode(), StackSlotPtr,
2013       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), Align,
2014       MachineMemOperand::MOVolatile);
2015 
2016   // Retrieve guard check function, nullptr if instrumentation is inlined.
2017   if (const Value *GuardCheck = TLI.getSSPStackGuardCheck(M)) {
2018     // The target provides a guard check function to validate the guard value.
2019     // Generate a call to that function with the content of the guard slot as
2020     // argument.
2021     auto *Fn = cast<Function>(GuardCheck);
2022     FunctionType *FnTy = Fn->getFunctionType();
2023     assert(FnTy->getNumParams() == 1 && "Invalid function signature");
2024 
2025     TargetLowering::ArgListTy Args;
2026     TargetLowering::ArgListEntry Entry;
2027     Entry.Node = StackSlot;
2028     Entry.Ty = FnTy->getParamType(0);
2029     if (Fn->hasAttribute(1, Attribute::AttrKind::InReg))
2030       Entry.IsInReg = true;
2031     Args.push_back(Entry);
2032 
2033     TargetLowering::CallLoweringInfo CLI(DAG);
2034     CLI.setDebugLoc(getCurSDLoc())
2035       .setChain(DAG.getEntryNode())
2036       .setCallee(Fn->getCallingConv(), FnTy->getReturnType(),
2037                  getValue(GuardCheck), std::move(Args));
2038 
2039     std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
2040     DAG.setRoot(Result.second);
2041     return;
2042   }
2043 
2044   // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
2045   // Otherwise, emit a volatile load to retrieve the stack guard value.
2046   SDValue Chain = DAG.getEntryNode();
2047   if (TLI.useLoadStackGuardNode()) {
2048     Guard = getLoadStackGuard(DAG, dl, Chain);
2049   } else {
2050     const Value *IRGuard = TLI.getSDagStackGuard(M);
2051     SDValue GuardPtr = getValue(IRGuard);
2052 
2053     Guard =
2054         DAG.getLoad(PtrTy, dl, Chain, GuardPtr, MachinePointerInfo(IRGuard, 0),
2055                     Align, MachineMemOperand::MOVolatile);
2056   }
2057 
2058   // Perform the comparison via a subtract/getsetcc.
2059   EVT VT = Guard.getValueType();
2060   SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, Guard, StackSlot);
2061 
2062   SDValue Cmp = DAG.getSetCC(dl, TLI.getSetCCResultType(DAG.getDataLayout(),
2063                                                         *DAG.getContext(),
2064                                                         Sub.getValueType()),
2065                              Sub, DAG.getConstant(0, dl, VT), ISD::SETNE);
2066 
2067   // If the sub is not 0, then we know the guard/stackslot do not equal, so
2068   // branch to failure MBB.
2069   SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2070                                MVT::Other, StackSlot.getOperand(0),
2071                                Cmp, DAG.getBasicBlock(SPD.getFailureMBB()));
2072   // Otherwise branch to success MBB.
2073   SDValue Br = DAG.getNode(ISD::BR, dl,
2074                            MVT::Other, BrCond,
2075                            DAG.getBasicBlock(SPD.getSuccessMBB()));
2076 
2077   DAG.setRoot(Br);
2078 }
2079 
2080 /// Codegen the failure basic block for a stack protector check.
2081 ///
2082 /// A failure stack protector machine basic block consists simply of a call to
2083 /// __stack_chk_fail().
2084 ///
2085 /// For a high level explanation of how this fits into the stack protector
2086 /// generation see the comment on the declaration of class
2087 /// StackProtectorDescriptor.
2088 void
2089 SelectionDAGBuilder::visitSPDescriptorFailure(StackProtectorDescriptor &SPD) {
2090   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2091   SDValue Chain =
2092       TLI.makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid,
2093                       None, false, getCurSDLoc(), false, false).second;
2094   DAG.setRoot(Chain);
2095 }
2096 
2097 /// visitBitTestHeader - This function emits necessary code to produce value
2098 /// suitable for "bit tests"
2099 void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
2100                                              MachineBasicBlock *SwitchBB) {
2101   SDLoc dl = getCurSDLoc();
2102 
2103   // Subtract the minimum value
2104   SDValue SwitchOp = getValue(B.SValue);
2105   EVT VT = SwitchOp.getValueType();
2106   SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp,
2107                             DAG.getConstant(B.First, dl, VT));
2108 
2109   // Check range
2110   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2111   SDValue RangeCmp = DAG.getSetCC(
2112       dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
2113                                  Sub.getValueType()),
2114       Sub, DAG.getConstant(B.Range, dl, VT), ISD::SETUGT);
2115 
2116   // Determine the type of the test operands.
2117   bool UsePtrType = false;
2118   if (!TLI.isTypeLegal(VT))
2119     UsePtrType = true;
2120   else {
2121     for (unsigned i = 0, e = B.Cases.size(); i != e; ++i)
2122       if (!isUIntN(VT.getSizeInBits(), B.Cases[i].Mask)) {
2123         // Switch table case range are encoded into series of masks.
2124         // Just use pointer type, it's guaranteed to fit.
2125         UsePtrType = true;
2126         break;
2127       }
2128   }
2129   if (UsePtrType) {
2130     VT = TLI.getPointerTy(DAG.getDataLayout());
2131     Sub = DAG.getZExtOrTrunc(Sub, dl, VT);
2132   }
2133 
2134   B.RegVT = VT.getSimpleVT();
2135   B.Reg = FuncInfo.CreateReg(B.RegVT);
2136   SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl, B.Reg, Sub);
2137 
2138   MachineBasicBlock* MBB = B.Cases[0].ThisBB;
2139 
2140   addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb);
2141   addSuccessorWithProb(SwitchBB, MBB, B.Prob);
2142   SwitchBB->normalizeSuccProbs();
2143 
2144   SDValue BrRange = DAG.getNode(ISD::BRCOND, dl,
2145                                 MVT::Other, CopyTo, RangeCmp,
2146                                 DAG.getBasicBlock(B.Default));
2147 
2148   // Avoid emitting unnecessary branches to the next block.
2149   if (MBB != NextBlock(SwitchBB))
2150     BrRange = DAG.getNode(ISD::BR, dl, MVT::Other, BrRange,
2151                           DAG.getBasicBlock(MBB));
2152 
2153   DAG.setRoot(BrRange);
2154 }
2155 
2156 /// visitBitTestCase - this function produces one "bit test"
2157 void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB,
2158                                            MachineBasicBlock* NextMBB,
2159                                            BranchProbability BranchProbToNext,
2160                                            unsigned Reg,
2161                                            BitTestCase &B,
2162                                            MachineBasicBlock *SwitchBB) {
2163   SDLoc dl = getCurSDLoc();
2164   MVT VT = BB.RegVT;
2165   SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), dl, Reg, VT);
2166   SDValue Cmp;
2167   unsigned PopCount = countPopulation(B.Mask);
2168   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2169   if (PopCount == 1) {
2170     // Testing for a single bit; just compare the shift count with what it
2171     // would need to be to shift a 1 bit in that position.
2172     Cmp = DAG.getSetCC(
2173         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2174         ShiftOp, DAG.getConstant(countTrailingZeros(B.Mask), dl, VT),
2175         ISD::SETEQ);
2176   } else if (PopCount == BB.Range) {
2177     // There is only one zero bit in the range, test for it directly.
2178     Cmp = DAG.getSetCC(
2179         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2180         ShiftOp, DAG.getConstant(countTrailingOnes(B.Mask), dl, VT),
2181         ISD::SETNE);
2182   } else {
2183     // Make desired shift
2184     SDValue SwitchVal = DAG.getNode(ISD::SHL, dl, VT,
2185                                     DAG.getConstant(1, dl, VT), ShiftOp);
2186 
2187     // Emit bit tests and jumps
2188     SDValue AndOp = DAG.getNode(ISD::AND, dl,
2189                                 VT, SwitchVal, DAG.getConstant(B.Mask, dl, VT));
2190     Cmp = DAG.getSetCC(
2191         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2192         AndOp, DAG.getConstant(0, dl, VT), ISD::SETNE);
2193   }
2194 
2195   // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
2196   addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb);
2197   // The branch probability from SwitchBB to NextMBB is BranchProbToNext.
2198   addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
2199   // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is
2200   // one as they are relative probabilities (and thus work more like weights),
2201   // and hence we need to normalize them to let the sum of them become one.
2202   SwitchBB->normalizeSuccProbs();
2203 
2204   SDValue BrAnd = DAG.getNode(ISD::BRCOND, dl,
2205                               MVT::Other, getControlRoot(),
2206                               Cmp, DAG.getBasicBlock(B.TargetBB));
2207 
2208   // Avoid emitting unnecessary branches to the next block.
2209   if (NextMBB != NextBlock(SwitchBB))
2210     BrAnd = DAG.getNode(ISD::BR, dl, MVT::Other, BrAnd,
2211                         DAG.getBasicBlock(NextMBB));
2212 
2213   DAG.setRoot(BrAnd);
2214 }
2215 
2216 void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
2217   MachineBasicBlock *InvokeMBB = FuncInfo.MBB;
2218 
2219   // Retrieve successors. Look through artificial IR level blocks like
2220   // catchswitch for successors.
2221   MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
2222   const BasicBlock *EHPadBB = I.getSuccessor(1);
2223 
2224   // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
2225   // have to do anything here to lower funclet bundles.
2226   assert(!I.hasOperandBundlesOtherThan(
2227              {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) &&
2228          "Cannot lower invokes with arbitrary operand bundles yet!");
2229 
2230   const Value *Callee(I.getCalledValue());
2231   const Function *Fn = dyn_cast<Function>(Callee);
2232   if (isa<InlineAsm>(Callee))
2233     visitInlineAsm(&I);
2234   else if (Fn && Fn->isIntrinsic()) {
2235     switch (Fn->getIntrinsicID()) {
2236     default:
2237       llvm_unreachable("Cannot invoke this intrinsic");
2238     case Intrinsic::donothing:
2239       // Ignore invokes to @llvm.donothing: jump directly to the next BB.
2240       break;
2241     case Intrinsic::experimental_patchpoint_void:
2242     case Intrinsic::experimental_patchpoint_i64:
2243       visitPatchpoint(&I, EHPadBB);
2244       break;
2245     case Intrinsic::experimental_gc_statepoint:
2246       LowerStatepoint(ImmutableStatepoint(&I), EHPadBB);
2247       break;
2248     }
2249   } else if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) {
2250     // Currently we do not lower any intrinsic calls with deopt operand bundles.
2251     // Eventually we will support lowering the @llvm.experimental.deoptimize
2252     // intrinsic, and right now there are no plans to support other intrinsics
2253     // with deopt state.
2254     LowerCallSiteWithDeoptBundle(&I, getValue(Callee), EHPadBB);
2255   } else {
2256     LowerCallTo(&I, getValue(Callee), false, EHPadBB);
2257   }
2258 
2259   // If the value of the invoke is used outside of its defining block, make it
2260   // available as a virtual register.
2261   // We already took care of the exported value for the statepoint instruction
2262   // during call to the LowerStatepoint.
2263   if (!isStatepoint(I)) {
2264     CopyToExportRegsIfNeeded(&I);
2265   }
2266 
2267   SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
2268   BranchProbabilityInfo *BPI = FuncInfo.BPI;
2269   BranchProbability EHPadBBProb =
2270       BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB)
2271           : BranchProbability::getZero();
2272   findUnwindDestinations(FuncInfo, EHPadBB, EHPadBBProb, UnwindDests);
2273 
2274   // Update successor info.
2275   addSuccessorWithProb(InvokeMBB, Return);
2276   for (auto &UnwindDest : UnwindDests) {
2277     UnwindDest.first->setIsEHPad();
2278     addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
2279   }
2280   InvokeMBB->normalizeSuccProbs();
2281 
2282   // Drop into normal successor.
2283   DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
2284                           MVT::Other, getControlRoot(),
2285                           DAG.getBasicBlock(Return)));
2286 }
2287 
2288 void SelectionDAGBuilder::visitResume(const ResumeInst &RI) {
2289   llvm_unreachable("SelectionDAGBuilder shouldn't visit resume instructions!");
2290 }
2291 
2292 void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) {
2293   assert(FuncInfo.MBB->isEHPad() &&
2294          "Call to landingpad not in landing pad!");
2295 
2296   MachineBasicBlock *MBB = FuncInfo.MBB;
2297   addLandingPadInfo(LP, *MBB);
2298 
2299   // If there aren't registers to copy the values into (e.g., during SjLj
2300   // exceptions), then don't bother to create these DAG nodes.
2301   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2302   const Constant *PersonalityFn = FuncInfo.Fn->getPersonalityFn();
2303   if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
2304       TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
2305     return;
2306 
2307   // If landingpad's return type is token type, we don't create DAG nodes
2308   // for its exception pointer and selector value. The extraction of exception
2309   // pointer or selector value from token type landingpads is not currently
2310   // supported.
2311   if (LP.getType()->isTokenTy())
2312     return;
2313 
2314   SmallVector<EVT, 2> ValueVTs;
2315   SDLoc dl = getCurSDLoc();
2316   ComputeValueVTs(TLI, DAG.getDataLayout(), LP.getType(), ValueVTs);
2317   assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported");
2318 
2319   // Get the two live-in registers as SDValues. The physregs have already been
2320   // copied into virtual registers.
2321   SDValue Ops[2];
2322   if (FuncInfo.ExceptionPointerVirtReg) {
2323     Ops[0] = DAG.getZExtOrTrunc(
2324         DAG.getCopyFromReg(DAG.getEntryNode(), dl,
2325                            FuncInfo.ExceptionPointerVirtReg,
2326                            TLI.getPointerTy(DAG.getDataLayout())),
2327         dl, ValueVTs[0]);
2328   } else {
2329     Ops[0] = DAG.getConstant(0, dl, TLI.getPointerTy(DAG.getDataLayout()));
2330   }
2331   Ops[1] = DAG.getZExtOrTrunc(
2332       DAG.getCopyFromReg(DAG.getEntryNode(), dl,
2333                          FuncInfo.ExceptionSelectorVirtReg,
2334                          TLI.getPointerTy(DAG.getDataLayout())),
2335       dl, ValueVTs[1]);
2336 
2337   // Merge into one.
2338   SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl,
2339                             DAG.getVTList(ValueVTs), Ops);
2340   setValue(&LP, Res);
2341 }
2342 
2343 void SelectionDAGBuilder::sortAndRangeify(CaseClusterVector &Clusters) {
2344 #ifndef NDEBUG
2345   for (const CaseCluster &CC : Clusters)
2346     assert(CC.Low == CC.High && "Input clusters must be single-case");
2347 #endif
2348 
2349   std::sort(Clusters.begin(), Clusters.end(),
2350             [](const CaseCluster &a, const CaseCluster &b) {
2351     return a.Low->getValue().slt(b.Low->getValue());
2352   });
2353 
2354   // Merge adjacent clusters with the same destination.
2355   const unsigned N = Clusters.size();
2356   unsigned DstIndex = 0;
2357   for (unsigned SrcIndex = 0; SrcIndex < N; ++SrcIndex) {
2358     CaseCluster &CC = Clusters[SrcIndex];
2359     const ConstantInt *CaseVal = CC.Low;
2360     MachineBasicBlock *Succ = CC.MBB;
2361 
2362     if (DstIndex != 0 && Clusters[DstIndex - 1].MBB == Succ &&
2363         (CaseVal->getValue() - Clusters[DstIndex - 1].High->getValue()) == 1) {
2364       // If this case has the same successor and is a neighbour, merge it into
2365       // the previous cluster.
2366       Clusters[DstIndex - 1].High = CaseVal;
2367       Clusters[DstIndex - 1].Prob += CC.Prob;
2368     } else {
2369       std::memmove(&Clusters[DstIndex++], &Clusters[SrcIndex],
2370                    sizeof(Clusters[SrcIndex]));
2371     }
2372   }
2373   Clusters.resize(DstIndex);
2374 }
2375 
2376 void SelectionDAGBuilder::UpdateSplitBlock(MachineBasicBlock *First,
2377                                            MachineBasicBlock *Last) {
2378   // Update JTCases.
2379   for (unsigned i = 0, e = JTCases.size(); i != e; ++i)
2380     if (JTCases[i].first.HeaderBB == First)
2381       JTCases[i].first.HeaderBB = Last;
2382 
2383   // Update BitTestCases.
2384   for (unsigned i = 0, e = BitTestCases.size(); i != e; ++i)
2385     if (BitTestCases[i].Parent == First)
2386       BitTestCases[i].Parent = Last;
2387 }
2388 
2389 void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
2390   MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB;
2391 
2392   // Update machine-CFG edges with unique successors.
2393   SmallSet<BasicBlock*, 32> Done;
2394   for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) {
2395     BasicBlock *BB = I.getSuccessor(i);
2396     bool Inserted = Done.insert(BB).second;
2397     if (!Inserted)
2398         continue;
2399 
2400     MachineBasicBlock *Succ = FuncInfo.MBBMap[BB];
2401     addSuccessorWithProb(IndirectBrMBB, Succ);
2402   }
2403   IndirectBrMBB->normalizeSuccProbs();
2404 
2405   DAG.setRoot(DAG.getNode(ISD::BRIND, getCurSDLoc(),
2406                           MVT::Other, getControlRoot(),
2407                           getValue(I.getAddress())));
2408 }
2409 
2410 void SelectionDAGBuilder::visitUnreachable(const UnreachableInst &I) {
2411   if (DAG.getTarget().Options.TrapUnreachable)
2412     DAG.setRoot(
2413         DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, DAG.getRoot()));
2414 }
2415 
2416 void SelectionDAGBuilder::visitFSub(const User &I) {
2417   // -0.0 - X --> fneg
2418   Type *Ty = I.getType();
2419   if (isa<Constant>(I.getOperand(0)) &&
2420       I.getOperand(0) == ConstantFP::getZeroValueForNegation(Ty)) {
2421     SDValue Op2 = getValue(I.getOperand(1));
2422     setValue(&I, DAG.getNode(ISD::FNEG, getCurSDLoc(),
2423                              Op2.getValueType(), Op2));
2424     return;
2425   }
2426 
2427   visitBinary(I, ISD::FSUB);
2428 }
2429 
2430 /// Checks if the given instruction performs a vector reduction, in which case
2431 /// we have the freedom to alter the elements in the result as long as the
2432 /// reduction of them stays unchanged.
2433 static bool isVectorReductionOp(const User *I) {
2434   const Instruction *Inst = dyn_cast<Instruction>(I);
2435   if (!Inst || !Inst->getType()->isVectorTy())
2436     return false;
2437 
2438   auto OpCode = Inst->getOpcode();
2439   switch (OpCode) {
2440   case Instruction::Add:
2441   case Instruction::Mul:
2442   case Instruction::And:
2443   case Instruction::Or:
2444   case Instruction::Xor:
2445     break;
2446   case Instruction::FAdd:
2447   case Instruction::FMul:
2448     if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(Inst))
2449       if (FPOp->getFastMathFlags().unsafeAlgebra())
2450         break;
2451     LLVM_FALLTHROUGH;
2452   default:
2453     return false;
2454   }
2455 
2456   unsigned ElemNum = Inst->getType()->getVectorNumElements();
2457   unsigned ElemNumToReduce = ElemNum;
2458 
2459   // Do DFS search on the def-use chain from the given instruction. We only
2460   // allow four kinds of operations during the search until we reach the
2461   // instruction that extracts the first element from the vector:
2462   //
2463   //   1. The reduction operation of the same opcode as the given instruction.
2464   //
2465   //   2. PHI node.
2466   //
2467   //   3. ShuffleVector instruction together with a reduction operation that
2468   //      does a partial reduction.
2469   //
2470   //   4. ExtractElement that extracts the first element from the vector, and we
2471   //      stop searching the def-use chain here.
2472   //
2473   // 3 & 4 above perform a reduction on all elements of the vector. We push defs
2474   // from 1-3 to the stack to continue the DFS. The given instruction is not
2475   // a reduction operation if we meet any other instructions other than those
2476   // listed above.
2477 
2478   SmallVector<const User *, 16> UsersToVisit{Inst};
2479   SmallPtrSet<const User *, 16> Visited;
2480   bool ReduxExtracted = false;
2481 
2482   while (!UsersToVisit.empty()) {
2483     auto User = UsersToVisit.back();
2484     UsersToVisit.pop_back();
2485     if (!Visited.insert(User).second)
2486       continue;
2487 
2488     for (const auto &U : User->users()) {
2489       auto Inst = dyn_cast<Instruction>(U);
2490       if (!Inst)
2491         return false;
2492 
2493       if (Inst->getOpcode() == OpCode || isa<PHINode>(U)) {
2494         if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(Inst))
2495           if (!isa<PHINode>(FPOp) && !FPOp->getFastMathFlags().unsafeAlgebra())
2496             return false;
2497         UsersToVisit.push_back(U);
2498       } else if (const ShuffleVectorInst *ShufInst =
2499                      dyn_cast<ShuffleVectorInst>(U)) {
2500         // Detect the following pattern: A ShuffleVector instruction together
2501         // with a reduction that do partial reduction on the first and second
2502         // ElemNumToReduce / 2 elements, and store the result in
2503         // ElemNumToReduce / 2 elements in another vector.
2504 
2505         unsigned ResultElements = ShufInst->getType()->getVectorNumElements();
2506         if (ResultElements < ElemNum)
2507           return false;
2508 
2509         if (ElemNumToReduce == 1)
2510           return false;
2511         if (!isa<UndefValue>(U->getOperand(1)))
2512           return false;
2513         for (unsigned i = 0; i < ElemNumToReduce / 2; ++i)
2514           if (ShufInst->getMaskValue(i) != int(i + ElemNumToReduce / 2))
2515             return false;
2516         for (unsigned i = ElemNumToReduce / 2; i < ElemNum; ++i)
2517           if (ShufInst->getMaskValue(i) != -1)
2518             return false;
2519 
2520         // There is only one user of this ShuffleVector instruction, which
2521         // must be a reduction operation.
2522         if (!U->hasOneUse())
2523           return false;
2524 
2525         auto U2 = dyn_cast<Instruction>(*U->user_begin());
2526         if (!U2 || U2->getOpcode() != OpCode)
2527           return false;
2528 
2529         // Check operands of the reduction operation.
2530         if ((U2->getOperand(0) == U->getOperand(0) && U2->getOperand(1) == U) ||
2531             (U2->getOperand(1) == U->getOperand(0) && U2->getOperand(0) == U)) {
2532           UsersToVisit.push_back(U2);
2533           ElemNumToReduce /= 2;
2534         } else
2535           return false;
2536       } else if (isa<ExtractElementInst>(U)) {
2537         // At this moment we should have reduced all elements in the vector.
2538         if (ElemNumToReduce != 1)
2539           return false;
2540 
2541         const ConstantInt *Val = dyn_cast<ConstantInt>(U->getOperand(1));
2542         if (!Val || Val->getZExtValue() != 0)
2543           return false;
2544 
2545         ReduxExtracted = true;
2546       } else
2547         return false;
2548     }
2549   }
2550   return ReduxExtracted;
2551 }
2552 
2553 void SelectionDAGBuilder::visitBinary(const User &I, unsigned OpCode) {
2554   SDValue Op1 = getValue(I.getOperand(0));
2555   SDValue Op2 = getValue(I.getOperand(1));
2556 
2557   bool nuw = false;
2558   bool nsw = false;
2559   bool exact = false;
2560   bool vec_redux = false;
2561   FastMathFlags FMF;
2562 
2563   if (const OverflowingBinaryOperator *OFBinOp =
2564           dyn_cast<const OverflowingBinaryOperator>(&I)) {
2565     nuw = OFBinOp->hasNoUnsignedWrap();
2566     nsw = OFBinOp->hasNoSignedWrap();
2567   }
2568   if (const PossiblyExactOperator *ExactOp =
2569           dyn_cast<const PossiblyExactOperator>(&I))
2570     exact = ExactOp->isExact();
2571   if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(&I))
2572     FMF = FPOp->getFastMathFlags();
2573 
2574   if (isVectorReductionOp(&I)) {
2575     vec_redux = true;
2576     DEBUG(dbgs() << "Detected a reduction operation:" << I << "\n");
2577   }
2578 
2579   SDNodeFlags Flags;
2580   Flags.setExact(exact);
2581   Flags.setNoSignedWrap(nsw);
2582   Flags.setNoUnsignedWrap(nuw);
2583   Flags.setVectorReduction(vec_redux);
2584   Flags.setAllowReciprocal(FMF.allowReciprocal());
2585   Flags.setAllowContract(FMF.allowContract());
2586   Flags.setNoInfs(FMF.noInfs());
2587   Flags.setNoNaNs(FMF.noNaNs());
2588   Flags.setNoSignedZeros(FMF.noSignedZeros());
2589   Flags.setUnsafeAlgebra(FMF.unsafeAlgebra());
2590 
2591   SDValue BinNodeValue = DAG.getNode(OpCode, getCurSDLoc(), Op1.getValueType(),
2592                                      Op1, Op2, &Flags);
2593   setValue(&I, BinNodeValue);
2594 }
2595 
2596 void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
2597   SDValue Op1 = getValue(I.getOperand(0));
2598   SDValue Op2 = getValue(I.getOperand(1));
2599 
2600   EVT ShiftTy = DAG.getTargetLoweringInfo().getShiftAmountTy(
2601       Op2.getValueType(), DAG.getDataLayout());
2602 
2603   // Coerce the shift amount to the right type if we can.
2604   if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {
2605     unsigned ShiftSize = ShiftTy.getSizeInBits();
2606     unsigned Op2Size = Op2.getValueSizeInBits();
2607     SDLoc DL = getCurSDLoc();
2608 
2609     // If the operand is smaller than the shift count type, promote it.
2610     if (ShiftSize > Op2Size)
2611       Op2 = DAG.getNode(ISD::ZERO_EXTEND, DL, ShiftTy, Op2);
2612 
2613     // If the operand is larger than the shift count type but the shift
2614     // count type has enough bits to represent any shift value, truncate
2615     // it now. This is a common case and it exposes the truncate to
2616     // optimization early.
2617     else if (ShiftSize >= Log2_32_Ceil(Op2.getValueSizeInBits()))
2618       Op2 = DAG.getNode(ISD::TRUNCATE, DL, ShiftTy, Op2);
2619     // Otherwise we'll need to temporarily settle for some other convenient
2620     // type.  Type legalization will make adjustments once the shiftee is split.
2621     else
2622       Op2 = DAG.getZExtOrTrunc(Op2, DL, MVT::i32);
2623   }
2624 
2625   bool nuw = false;
2626   bool nsw = false;
2627   bool exact = false;
2628 
2629   if (Opcode == ISD::SRL || Opcode == ISD::SRA || Opcode == ISD::SHL) {
2630 
2631     if (const OverflowingBinaryOperator *OFBinOp =
2632             dyn_cast<const OverflowingBinaryOperator>(&I)) {
2633       nuw = OFBinOp->hasNoUnsignedWrap();
2634       nsw = OFBinOp->hasNoSignedWrap();
2635     }
2636     if (const PossiblyExactOperator *ExactOp =
2637             dyn_cast<const PossiblyExactOperator>(&I))
2638       exact = ExactOp->isExact();
2639   }
2640   SDNodeFlags Flags;
2641   Flags.setExact(exact);
2642   Flags.setNoSignedWrap(nsw);
2643   Flags.setNoUnsignedWrap(nuw);
2644   SDValue Res = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(), Op1, Op2,
2645                             &Flags);
2646   setValue(&I, Res);
2647 }
2648 
2649 void SelectionDAGBuilder::visitSDiv(const User &I) {
2650   SDValue Op1 = getValue(I.getOperand(0));
2651   SDValue Op2 = getValue(I.getOperand(1));
2652 
2653   SDNodeFlags Flags;
2654   Flags.setExact(isa<PossiblyExactOperator>(&I) &&
2655                  cast<PossiblyExactOperator>(&I)->isExact());
2656   setValue(&I, DAG.getNode(ISD::SDIV, getCurSDLoc(), Op1.getValueType(), Op1,
2657                            Op2, &Flags));
2658 }
2659 
2660 void SelectionDAGBuilder::visitICmp(const User &I) {
2661   ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
2662   if (const ICmpInst *IC = dyn_cast<ICmpInst>(&I))
2663     predicate = IC->getPredicate();
2664   else if (const ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
2665     predicate = ICmpInst::Predicate(IC->getPredicate());
2666   SDValue Op1 = getValue(I.getOperand(0));
2667   SDValue Op2 = getValue(I.getOperand(1));
2668   ISD::CondCode Opcode = getICmpCondCode(predicate);
2669 
2670   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2671                                                         I.getType());
2672   setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Opcode));
2673 }
2674 
2675 void SelectionDAGBuilder::visitFCmp(const User &I) {
2676   FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
2677   if (const FCmpInst *FC = dyn_cast<FCmpInst>(&I))
2678     predicate = FC->getPredicate();
2679   else if (const ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
2680     predicate = FCmpInst::Predicate(FC->getPredicate());
2681   SDValue Op1 = getValue(I.getOperand(0));
2682   SDValue Op2 = getValue(I.getOperand(1));
2683   ISD::CondCode Condition = getFCmpCondCode(predicate);
2684 
2685   // FIXME: Fcmp instructions have fast-math-flags in IR, so we should use them.
2686   // FIXME: We should propagate the fast-math-flags to the DAG node itself for
2687   // further optimization, but currently FMF is only applicable to binary nodes.
2688   if (TM.Options.NoNaNsFPMath)
2689     Condition = getFCmpCodeWithoutNaN(Condition);
2690   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2691                                                         I.getType());
2692   setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition));
2693 }
2694 
2695 // Check if the condition of the select has one use or two users that are both
2696 // selects with the same condition.
2697 static bool hasOnlySelectUsers(const Value *Cond) {
2698   return all_of(Cond->users(), [](const Value *V) {
2699     return isa<SelectInst>(V);
2700   });
2701 }
2702 
2703 void SelectionDAGBuilder::visitSelect(const User &I) {
2704   SmallVector<EVT, 4> ValueVTs;
2705   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
2706                   ValueVTs);
2707   unsigned NumValues = ValueVTs.size();
2708   if (NumValues == 0) return;
2709 
2710   SmallVector<SDValue, 4> Values(NumValues);
2711   SDValue Cond     = getValue(I.getOperand(0));
2712   SDValue LHSVal   = getValue(I.getOperand(1));
2713   SDValue RHSVal   = getValue(I.getOperand(2));
2714   auto BaseOps = {Cond};
2715   ISD::NodeType OpCode = Cond.getValueType().isVector() ?
2716     ISD::VSELECT : ISD::SELECT;
2717 
2718   // Min/max matching is only viable if all output VTs are the same.
2719   if (std::equal(ValueVTs.begin(), ValueVTs.end(), ValueVTs.begin())) {
2720     EVT VT = ValueVTs[0];
2721     LLVMContext &Ctx = *DAG.getContext();
2722     auto &TLI = DAG.getTargetLoweringInfo();
2723 
2724     // We care about the legality of the operation after it has been type
2725     // legalized.
2726     while (TLI.getTypeAction(Ctx, VT) != TargetLoweringBase::TypeLegal &&
2727            VT != TLI.getTypeToTransformTo(Ctx, VT))
2728       VT = TLI.getTypeToTransformTo(Ctx, VT);
2729 
2730     // If the vselect is legal, assume we want to leave this as a vector setcc +
2731     // vselect. Otherwise, if this is going to be scalarized, we want to see if
2732     // min/max is legal on the scalar type.
2733     bool UseScalarMinMax = VT.isVector() &&
2734       !TLI.isOperationLegalOrCustom(ISD::VSELECT, VT);
2735 
2736     Value *LHS, *RHS;
2737     auto SPR = matchSelectPattern(const_cast<User*>(&I), LHS, RHS);
2738     ISD::NodeType Opc = ISD::DELETED_NODE;
2739     switch (SPR.Flavor) {
2740     case SPF_UMAX:    Opc = ISD::UMAX; break;
2741     case SPF_UMIN:    Opc = ISD::UMIN; break;
2742     case SPF_SMAX:    Opc = ISD::SMAX; break;
2743     case SPF_SMIN:    Opc = ISD::SMIN; break;
2744     case SPF_FMINNUM:
2745       switch (SPR.NaNBehavior) {
2746       case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
2747       case SPNB_RETURNS_NAN:   Opc = ISD::FMINNAN; break;
2748       case SPNB_RETURNS_OTHER: Opc = ISD::FMINNUM; break;
2749       case SPNB_RETURNS_ANY: {
2750         if (TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT))
2751           Opc = ISD::FMINNUM;
2752         else if (TLI.isOperationLegalOrCustom(ISD::FMINNAN, VT))
2753           Opc = ISD::FMINNAN;
2754         else if (UseScalarMinMax)
2755           Opc = TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT.getScalarType()) ?
2756             ISD::FMINNUM : ISD::FMINNAN;
2757         break;
2758       }
2759       }
2760       break;
2761     case SPF_FMAXNUM:
2762       switch (SPR.NaNBehavior) {
2763       case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
2764       case SPNB_RETURNS_NAN:   Opc = ISD::FMAXNAN; break;
2765       case SPNB_RETURNS_OTHER: Opc = ISD::FMAXNUM; break;
2766       case SPNB_RETURNS_ANY:
2767 
2768         if (TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT))
2769           Opc = ISD::FMAXNUM;
2770         else if (TLI.isOperationLegalOrCustom(ISD::FMAXNAN, VT))
2771           Opc = ISD::FMAXNAN;
2772         else if (UseScalarMinMax)
2773           Opc = TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT.getScalarType()) ?
2774             ISD::FMAXNUM : ISD::FMAXNAN;
2775         break;
2776       }
2777       break;
2778     default: break;
2779     }
2780 
2781     if (Opc != ISD::DELETED_NODE &&
2782         (TLI.isOperationLegalOrCustom(Opc, VT) ||
2783          (UseScalarMinMax &&
2784           TLI.isOperationLegalOrCustom(Opc, VT.getScalarType()))) &&
2785         // If the underlying comparison instruction is used by any other
2786         // instruction, the consumed instructions won't be destroyed, so it is
2787         // not profitable to convert to a min/max.
2788         hasOnlySelectUsers(cast<SelectInst>(I).getCondition())) {
2789       OpCode = Opc;
2790       LHSVal = getValue(LHS);
2791       RHSVal = getValue(RHS);
2792       BaseOps = {};
2793     }
2794   }
2795 
2796   for (unsigned i = 0; i != NumValues; ++i) {
2797     SmallVector<SDValue, 3> Ops(BaseOps.begin(), BaseOps.end());
2798     Ops.push_back(SDValue(LHSVal.getNode(), LHSVal.getResNo() + i));
2799     Ops.push_back(SDValue(RHSVal.getNode(), RHSVal.getResNo() + i));
2800     Values[i] = DAG.getNode(OpCode, getCurSDLoc(),
2801                             LHSVal.getNode()->getValueType(LHSVal.getResNo()+i),
2802                             Ops);
2803   }
2804 
2805   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
2806                            DAG.getVTList(ValueVTs), Values));
2807 }
2808 
2809 void SelectionDAGBuilder::visitTrunc(const User &I) {
2810   // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
2811   SDValue N = getValue(I.getOperand(0));
2812   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2813                                                         I.getType());
2814   setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), DestVT, N));
2815 }
2816 
2817 void SelectionDAGBuilder::visitZExt(const User &I) {
2818   // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
2819   // ZExt also can't be a cast to bool for same reason. So, nothing much to do
2820   SDValue N = getValue(I.getOperand(0));
2821   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2822                                                         I.getType());
2823   setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurSDLoc(), DestVT, N));
2824 }
2825 
2826 void SelectionDAGBuilder::visitSExt(const User &I) {
2827   // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
2828   // SExt also can't be a cast to bool for same reason. So, nothing much to do
2829   SDValue N = getValue(I.getOperand(0));
2830   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2831                                                         I.getType());
2832   setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N));
2833 }
2834 
2835 void SelectionDAGBuilder::visitFPTrunc(const User &I) {
2836   // FPTrunc is never a no-op cast, no need to check
2837   SDValue N = getValue(I.getOperand(0));
2838   SDLoc dl = getCurSDLoc();
2839   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2840   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
2841   setValue(&I, DAG.getNode(ISD::FP_ROUND, dl, DestVT, N,
2842                            DAG.getTargetConstant(
2843                                0, dl, TLI.getPointerTy(DAG.getDataLayout()))));
2844 }
2845 
2846 void SelectionDAGBuilder::visitFPExt(const User &I) {
2847   // FPExt is never a no-op cast, no need to check
2848   SDValue N = getValue(I.getOperand(0));
2849   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2850                                                         I.getType());
2851   setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurSDLoc(), DestVT, N));
2852 }
2853 
2854 void SelectionDAGBuilder::visitFPToUI(const User &I) {
2855   // FPToUI is never a no-op cast, no need to check
2856   SDValue N = getValue(I.getOperand(0));
2857   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2858                                                         I.getType());
2859   setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurSDLoc(), DestVT, N));
2860 }
2861 
2862 void SelectionDAGBuilder::visitFPToSI(const User &I) {
2863   // FPToSI is never a no-op cast, no need to check
2864   SDValue N = getValue(I.getOperand(0));
2865   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2866                                                         I.getType());
2867   setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurSDLoc(), DestVT, N));
2868 }
2869 
2870 void SelectionDAGBuilder::visitUIToFP(const User &I) {
2871   // UIToFP is never a no-op cast, no need to check
2872   SDValue N = getValue(I.getOperand(0));
2873   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2874                                                         I.getType());
2875   setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurSDLoc(), DestVT, N));
2876 }
2877 
2878 void SelectionDAGBuilder::visitSIToFP(const User &I) {
2879   // SIToFP is never a no-op cast, no need to check
2880   SDValue N = getValue(I.getOperand(0));
2881   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2882                                                         I.getType());
2883   setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurSDLoc(), DestVT, N));
2884 }
2885 
2886 void SelectionDAGBuilder::visitPtrToInt(const User &I) {
2887   // What to do depends on the size of the integer and the size of the pointer.
2888   // We can either truncate, zero extend, or no-op, accordingly.
2889   SDValue N = getValue(I.getOperand(0));
2890   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2891                                                         I.getType());
2892   setValue(&I, DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT));
2893 }
2894 
2895 void SelectionDAGBuilder::visitIntToPtr(const User &I) {
2896   // What to do depends on the size of the integer and the size of the pointer.
2897   // We can either truncate, zero extend, or no-op, accordingly.
2898   SDValue N = getValue(I.getOperand(0));
2899   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2900                                                         I.getType());
2901   setValue(&I, DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT));
2902 }
2903 
2904 void SelectionDAGBuilder::visitBitCast(const User &I) {
2905   SDValue N = getValue(I.getOperand(0));
2906   SDLoc dl = getCurSDLoc();
2907   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2908                                                         I.getType());
2909 
2910   // BitCast assures us that source and destination are the same size so this is
2911   // either a BITCAST or a no-op.
2912   if (DestVT != N.getValueType())
2913     setValue(&I, DAG.getNode(ISD::BITCAST, dl,
2914                              DestVT, N)); // convert types.
2915   // Check if the original LLVM IR Operand was a ConstantInt, because getValue()
2916   // might fold any kind of constant expression to an integer constant and that
2917   // is not what we are looking for. Only recognize a bitcast of a genuine
2918   // constant integer as an opaque constant.
2919   else if(ConstantInt *C = dyn_cast<ConstantInt>(I.getOperand(0)))
2920     setValue(&I, DAG.getConstant(C->getValue(), dl, DestVT, /*isTarget=*/false,
2921                                  /*isOpaque*/true));
2922   else
2923     setValue(&I, N);            // noop cast.
2924 }
2925 
2926 void SelectionDAGBuilder::visitAddrSpaceCast(const User &I) {
2927   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2928   const Value *SV = I.getOperand(0);
2929   SDValue N = getValue(SV);
2930   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
2931 
2932   unsigned SrcAS = SV->getType()->getPointerAddressSpace();
2933   unsigned DestAS = I.getType()->getPointerAddressSpace();
2934 
2935   if (!TLI.isNoopAddrSpaceCast(SrcAS, DestAS))
2936     N = DAG.getAddrSpaceCast(getCurSDLoc(), DestVT, N, SrcAS, DestAS);
2937 
2938   setValue(&I, N);
2939 }
2940 
2941 void SelectionDAGBuilder::visitInsertElement(const User &I) {
2942   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2943   SDValue InVec = getValue(I.getOperand(0));
2944   SDValue InVal = getValue(I.getOperand(1));
2945   SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(2)), getCurSDLoc(),
2946                                      TLI.getVectorIdxTy(DAG.getDataLayout()));
2947   setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurSDLoc(),
2948                            TLI.getValueType(DAG.getDataLayout(), I.getType()),
2949                            InVec, InVal, InIdx));
2950 }
2951 
2952 void SelectionDAGBuilder::visitExtractElement(const User &I) {
2953   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2954   SDValue InVec = getValue(I.getOperand(0));
2955   SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(1)), getCurSDLoc(),
2956                                      TLI.getVectorIdxTy(DAG.getDataLayout()));
2957   setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(),
2958                            TLI.getValueType(DAG.getDataLayout(), I.getType()),
2959                            InVec, InIdx));
2960 }
2961 
2962 void SelectionDAGBuilder::visitShuffleVector(const User &I) {
2963   SDValue Src1 = getValue(I.getOperand(0));
2964   SDValue Src2 = getValue(I.getOperand(1));
2965   SDLoc DL = getCurSDLoc();
2966 
2967   SmallVector<int, 8> Mask;
2968   ShuffleVectorInst::getShuffleMask(cast<Constant>(I.getOperand(2)), Mask);
2969   unsigned MaskNumElts = Mask.size();
2970 
2971   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2972   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
2973   EVT SrcVT = Src1.getValueType();
2974   unsigned SrcNumElts = SrcVT.getVectorNumElements();
2975 
2976   if (SrcNumElts == MaskNumElts) {
2977     setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, Mask));
2978     return;
2979   }
2980 
2981   // Normalize the shuffle vector since mask and vector length don't match.
2982   if (SrcNumElts < MaskNumElts) {
2983     // Mask is longer than the source vectors. We can use concatenate vector to
2984     // make the mask and vectors lengths match.
2985 
2986     if (MaskNumElts % SrcNumElts == 0) {
2987       // Mask length is a multiple of the source vector length.
2988       // Check if the shuffle is some kind of concatenation of the input
2989       // vectors.
2990       unsigned NumConcat = MaskNumElts / SrcNumElts;
2991       bool IsConcat = true;
2992       SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
2993       for (unsigned i = 0; i != MaskNumElts; ++i) {
2994         int Idx = Mask[i];
2995         if (Idx < 0)
2996           continue;
2997         // Ensure the indices in each SrcVT sized piece are sequential and that
2998         // the same source is used for the whole piece.
2999         if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
3000             (ConcatSrcs[i / SrcNumElts] >= 0 &&
3001              ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts))) {
3002           IsConcat = false;
3003           break;
3004         }
3005         // Remember which source this index came from.
3006         ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
3007       }
3008 
3009       // The shuffle is concatenating multiple vectors together. Just emit
3010       // a CONCAT_VECTORS operation.
3011       if (IsConcat) {
3012         SmallVector<SDValue, 8> ConcatOps;
3013         for (auto Src : ConcatSrcs) {
3014           if (Src < 0)
3015             ConcatOps.push_back(DAG.getUNDEF(SrcVT));
3016           else if (Src == 0)
3017             ConcatOps.push_back(Src1);
3018           else
3019             ConcatOps.push_back(Src2);
3020         }
3021         setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps));
3022         return;
3023       }
3024     }
3025 
3026     unsigned PaddedMaskNumElts = alignTo(MaskNumElts, SrcNumElts);
3027     unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
3028     EVT PaddedVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(),
3029                                     PaddedMaskNumElts);
3030 
3031     // Pad both vectors with undefs to make them the same length as the mask.
3032     SDValue UndefVal = DAG.getUNDEF(SrcVT);
3033 
3034     SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
3035     SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
3036     MOps1[0] = Src1;
3037     MOps2[0] = Src2;
3038 
3039     Src1 = Src1.isUndef()
3040                ? DAG.getUNDEF(PaddedVT)
3041                : DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps1);
3042     Src2 = Src2.isUndef()
3043                ? DAG.getUNDEF(PaddedVT)
3044                : DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps2);
3045 
3046     // Readjust mask for new input vector length.
3047     SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1);
3048     for (unsigned i = 0; i != MaskNumElts; ++i) {
3049       int Idx = Mask[i];
3050       if (Idx >= (int)SrcNumElts)
3051         Idx -= SrcNumElts - PaddedMaskNumElts;
3052       MappedOps[i] = Idx;
3053     }
3054 
3055     SDValue Result = DAG.getVectorShuffle(PaddedVT, DL, Src1, Src2, MappedOps);
3056 
3057     // If the concatenated vector was padded, extract a subvector with the
3058     // correct number of elements.
3059     if (MaskNumElts != PaddedMaskNumElts)
3060       Result = DAG.getNode(
3061           ISD::EXTRACT_SUBVECTOR, DL, VT, Result,
3062           DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
3063 
3064     setValue(&I, Result);
3065     return;
3066   }
3067 
3068   if (SrcNumElts > MaskNumElts) {
3069     // Analyze the access pattern of the vector to see if we can extract
3070     // two subvectors and do the shuffle.
3071     int StartIdx[2] = { -1, -1 };  // StartIdx to extract from
3072     bool CanExtract = true;
3073     for (int Idx : Mask) {
3074       unsigned Input = 0;
3075       if (Idx < 0)
3076         continue;
3077 
3078       if (Idx >= (int)SrcNumElts) {
3079         Input = 1;
3080         Idx -= SrcNumElts;
3081       }
3082 
3083       // If all the indices come from the same MaskNumElts sized portion of
3084       // the sources we can use extract. Also make sure the extract wouldn't
3085       // extract past the end of the source.
3086       int NewStartIdx = alignDown(Idx, MaskNumElts);
3087       if (NewStartIdx + MaskNumElts > SrcNumElts ||
3088           (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
3089         CanExtract = false;
3090       // Make sure we always update StartIdx as we use it to track if all
3091       // elements are undef.
3092       StartIdx[Input] = NewStartIdx;
3093     }
3094 
3095     if (StartIdx[0] < 0 && StartIdx[1] < 0) {
3096       setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
3097       return;
3098     }
3099     if (CanExtract) {
3100       // Extract appropriate subvector and generate a vector shuffle
3101       for (unsigned Input = 0; Input < 2; ++Input) {
3102         SDValue &Src = Input == 0 ? Src1 : Src2;
3103         if (StartIdx[Input] < 0)
3104           Src = DAG.getUNDEF(VT);
3105         else {
3106           Src = DAG.getNode(
3107               ISD::EXTRACT_SUBVECTOR, DL, VT, Src,
3108               DAG.getConstant(StartIdx[Input], DL,
3109                               TLI.getVectorIdxTy(DAG.getDataLayout())));
3110         }
3111       }
3112 
3113       // Calculate new mask.
3114       SmallVector<int, 8> MappedOps(Mask.begin(), Mask.end());
3115       for (int &Idx : MappedOps) {
3116         if (Idx >= (int)SrcNumElts)
3117           Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
3118         else if (Idx >= 0)
3119           Idx -= StartIdx[0];
3120       }
3121 
3122       setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, MappedOps));
3123       return;
3124     }
3125   }
3126 
3127   // We can't use either concat vectors or extract subvectors so fall back to
3128   // replacing the shuffle with extract and build vector.
3129   // to insert and build vector.
3130   EVT EltVT = VT.getVectorElementType();
3131   EVT IdxVT = TLI.getVectorIdxTy(DAG.getDataLayout());
3132   SmallVector<SDValue,8> Ops;
3133   for (int Idx : Mask) {
3134     SDValue Res;
3135 
3136     if (Idx < 0) {
3137       Res = DAG.getUNDEF(EltVT);
3138     } else {
3139       SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
3140       if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts;
3141 
3142       Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
3143                         EltVT, Src, DAG.getConstant(Idx, DL, IdxVT));
3144     }
3145 
3146     Ops.push_back(Res);
3147   }
3148 
3149   setValue(&I, DAG.getBuildVector(VT, DL, Ops));
3150 }
3151 
3152 void SelectionDAGBuilder::visitInsertValue(const InsertValueInst &I) {
3153   const Value *Op0 = I.getOperand(0);
3154   const Value *Op1 = I.getOperand(1);
3155   Type *AggTy = I.getType();
3156   Type *ValTy = Op1->getType();
3157   bool IntoUndef = isa<UndefValue>(Op0);
3158   bool FromUndef = isa<UndefValue>(Op1);
3159 
3160   unsigned LinearIndex = ComputeLinearIndex(AggTy, I.getIndices());
3161 
3162   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3163   SmallVector<EVT, 4> AggValueVTs;
3164   ComputeValueVTs(TLI, DAG.getDataLayout(), AggTy, AggValueVTs);
3165   SmallVector<EVT, 4> ValValueVTs;
3166   ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
3167 
3168   unsigned NumAggValues = AggValueVTs.size();
3169   unsigned NumValValues = ValValueVTs.size();
3170   SmallVector<SDValue, 4> Values(NumAggValues);
3171 
3172   // Ignore an insertvalue that produces an empty object
3173   if (!NumAggValues) {
3174     setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
3175     return;
3176   }
3177 
3178   SDValue Agg = getValue(Op0);
3179   unsigned i = 0;
3180   // Copy the beginning value(s) from the original aggregate.
3181   for (; i != LinearIndex; ++i)
3182     Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3183                 SDValue(Agg.getNode(), Agg.getResNo() + i);
3184   // Copy values from the inserted value(s).
3185   if (NumValValues) {
3186     SDValue Val = getValue(Op1);
3187     for (; i != LinearIndex + NumValValues; ++i)
3188       Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3189                   SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
3190   }
3191   // Copy remaining value(s) from the original aggregate.
3192   for (; i != NumAggValues; ++i)
3193     Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3194                 SDValue(Agg.getNode(), Agg.getResNo() + i);
3195 
3196   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3197                            DAG.getVTList(AggValueVTs), Values));
3198 }
3199 
3200 void SelectionDAGBuilder::visitExtractValue(const ExtractValueInst &I) {
3201   const Value *Op0 = I.getOperand(0);
3202   Type *AggTy = Op0->getType();
3203   Type *ValTy = I.getType();
3204   bool OutOfUndef = isa<UndefValue>(Op0);
3205 
3206   unsigned LinearIndex = ComputeLinearIndex(AggTy, I.getIndices());
3207 
3208   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3209   SmallVector<EVT, 4> ValValueVTs;
3210   ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
3211 
3212   unsigned NumValValues = ValValueVTs.size();
3213 
3214   // Ignore a extractvalue that produces an empty object
3215   if (!NumValValues) {
3216     setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
3217     return;
3218   }
3219 
3220   SmallVector<SDValue, 4> Values(NumValValues);
3221 
3222   SDValue Agg = getValue(Op0);
3223   // Copy out the selected value(s).
3224   for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
3225     Values[i - LinearIndex] =
3226       OutOfUndef ?
3227         DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
3228         SDValue(Agg.getNode(), Agg.getResNo() + i);
3229 
3230   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3231                            DAG.getVTList(ValValueVTs), Values));
3232 }
3233 
3234 void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
3235   Value *Op0 = I.getOperand(0);
3236   // Note that the pointer operand may be a vector of pointers. Take the scalar
3237   // element which holds a pointer.
3238   unsigned AS = Op0->getType()->getScalarType()->getPointerAddressSpace();
3239   SDValue N = getValue(Op0);
3240   SDLoc dl = getCurSDLoc();
3241 
3242   // Normalize Vector GEP - all scalar operands should be converted to the
3243   // splat vector.
3244   unsigned VectorWidth = I.getType()->isVectorTy() ?
3245     cast<VectorType>(I.getType())->getVectorNumElements() : 0;
3246 
3247   if (VectorWidth && !N.getValueType().isVector()) {
3248     LLVMContext &Context = *DAG.getContext();
3249     EVT VT = EVT::getVectorVT(Context, N.getValueType(), VectorWidth);
3250     N = DAG.getSplatBuildVector(VT, dl, N);
3251   }
3252 
3253   for (gep_type_iterator GTI = gep_type_begin(&I), E = gep_type_end(&I);
3254        GTI != E; ++GTI) {
3255     const Value *Idx = GTI.getOperand();
3256     if (StructType *StTy = GTI.getStructTypeOrNull()) {
3257       unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
3258       if (Field) {
3259         // N = N + Offset
3260         uint64_t Offset = DL->getStructLayout(StTy)->getElementOffset(Field);
3261 
3262         // In an inbounds GEP with an offset that is nonnegative even when
3263         // interpreted as signed, assume there is no unsigned overflow.
3264         SDNodeFlags Flags;
3265         if (int64_t(Offset) >= 0 && cast<GEPOperator>(I).isInBounds())
3266           Flags.setNoUnsignedWrap(true);
3267 
3268         N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N,
3269                         DAG.getConstant(Offset, dl, N.getValueType()), &Flags);
3270       }
3271     } else {
3272       MVT PtrTy =
3273           DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout(), AS);
3274       unsigned PtrSize = PtrTy.getSizeInBits();
3275       APInt ElementSize(PtrSize, DL->getTypeAllocSize(GTI.getIndexedType()));
3276 
3277       // If this is a scalar constant or a splat vector of constants,
3278       // handle it quickly.
3279       const auto *CI = dyn_cast<ConstantInt>(Idx);
3280       if (!CI && isa<ConstantDataVector>(Idx) &&
3281           cast<ConstantDataVector>(Idx)->getSplatValue())
3282         CI = cast<ConstantInt>(cast<ConstantDataVector>(Idx)->getSplatValue());
3283 
3284       if (CI) {
3285         if (CI->isZero())
3286           continue;
3287         APInt Offs = ElementSize * CI->getValue().sextOrTrunc(PtrSize);
3288         LLVMContext &Context = *DAG.getContext();
3289         SDValue OffsVal = VectorWidth ?
3290           DAG.getConstant(Offs, dl, EVT::getVectorVT(Context, PtrTy, VectorWidth)) :
3291           DAG.getConstant(Offs, dl, PtrTy);
3292 
3293         // In an inbouds GEP with an offset that is nonnegative even when
3294         // interpreted as signed, assume there is no unsigned overflow.
3295         SDNodeFlags Flags;
3296         if (Offs.isNonNegative() && cast<GEPOperator>(I).isInBounds())
3297           Flags.setNoUnsignedWrap(true);
3298 
3299         N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N, OffsVal, &Flags);
3300         continue;
3301       }
3302 
3303       // N = N + Idx * ElementSize;
3304       SDValue IdxN = getValue(Idx);
3305 
3306       if (!IdxN.getValueType().isVector() && VectorWidth) {
3307         MVT VT = MVT::getVectorVT(IdxN.getValueType().getSimpleVT(), VectorWidth);
3308         IdxN = DAG.getSplatBuildVector(VT, dl, IdxN);
3309       }
3310 
3311       // If the index is smaller or larger than intptr_t, truncate or extend
3312       // it.
3313       IdxN = DAG.getSExtOrTrunc(IdxN, dl, N.getValueType());
3314 
3315       // If this is a multiply by a power of two, turn it into a shl
3316       // immediately.  This is a very common case.
3317       if (ElementSize != 1) {
3318         if (ElementSize.isPowerOf2()) {
3319           unsigned Amt = ElementSize.logBase2();
3320           IdxN = DAG.getNode(ISD::SHL, dl,
3321                              N.getValueType(), IdxN,
3322                              DAG.getConstant(Amt, dl, IdxN.getValueType()));
3323         } else {
3324           SDValue Scale = DAG.getConstant(ElementSize, dl, IdxN.getValueType());
3325           IdxN = DAG.getNode(ISD::MUL, dl,
3326                              N.getValueType(), IdxN, Scale);
3327         }
3328       }
3329 
3330       N = DAG.getNode(ISD::ADD, dl,
3331                       N.getValueType(), N, IdxN);
3332     }
3333   }
3334 
3335   setValue(&I, N);
3336 }
3337 
3338 void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
3339   // If this is a fixed sized alloca in the entry block of the function,
3340   // allocate it statically on the stack.
3341   if (FuncInfo.StaticAllocaMap.count(&I))
3342     return;   // getValue will auto-populate this.
3343 
3344   SDLoc dl = getCurSDLoc();
3345   Type *Ty = I.getAllocatedType();
3346   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3347   auto &DL = DAG.getDataLayout();
3348   uint64_t TySize = DL.getTypeAllocSize(Ty);
3349   unsigned Align =
3350       std::max((unsigned)DL.getPrefTypeAlignment(Ty), I.getAlignment());
3351 
3352   SDValue AllocSize = getValue(I.getArraySize());
3353 
3354   EVT IntPtr = TLI.getPointerTy(DAG.getDataLayout());
3355   if (AllocSize.getValueType() != IntPtr)
3356     AllocSize = DAG.getZExtOrTrunc(AllocSize, dl, IntPtr);
3357 
3358   AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr,
3359                           AllocSize,
3360                           DAG.getConstant(TySize, dl, IntPtr));
3361 
3362   // Handle alignment.  If the requested alignment is less than or equal to
3363   // the stack alignment, ignore it.  If the size is greater than or equal to
3364   // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
3365   unsigned StackAlign =
3366       DAG.getSubtarget().getFrameLowering()->getStackAlignment();
3367   if (Align <= StackAlign)
3368     Align = 0;
3369 
3370   // Round the size of the allocation up to the stack alignment size
3371   // by add SA-1 to the size. This doesn't overflow because we're computing
3372   // an address inside an alloca.
3373   SDNodeFlags Flags;
3374   Flags.setNoUnsignedWrap(true);
3375   AllocSize = DAG.getNode(ISD::ADD, dl,
3376                           AllocSize.getValueType(), AllocSize,
3377                           DAG.getIntPtrConstant(StackAlign - 1, dl), &Flags);
3378 
3379   // Mask out the low bits for alignment purposes.
3380   AllocSize = DAG.getNode(ISD::AND, dl,
3381                           AllocSize.getValueType(), AllocSize,
3382                           DAG.getIntPtrConstant(~(uint64_t)(StackAlign - 1),
3383                                                 dl));
3384 
3385   SDValue Ops[] = { getRoot(), AllocSize, DAG.getIntPtrConstant(Align, dl) };
3386   SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
3387   SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, dl, VTs, Ops);
3388   setValue(&I, DSA);
3389   DAG.setRoot(DSA.getValue(1));
3390 
3391   assert(FuncInfo.MF->getFrameInfo().hasVarSizedObjects());
3392 }
3393 
3394 void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
3395   if (I.isAtomic())
3396     return visitAtomicLoad(I);
3397 
3398   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3399   const Value *SV = I.getOperand(0);
3400   if (TLI.supportSwiftError()) {
3401     // Swifterror values can come from either a function parameter with
3402     // swifterror attribute or an alloca with swifterror attribute.
3403     if (const Argument *Arg = dyn_cast<Argument>(SV)) {
3404       if (Arg->hasSwiftErrorAttr())
3405         return visitLoadFromSwiftError(I);
3406     }
3407 
3408     if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
3409       if (Alloca->isSwiftError())
3410         return visitLoadFromSwiftError(I);
3411     }
3412   }
3413 
3414   SDValue Ptr = getValue(SV);
3415 
3416   Type *Ty = I.getType();
3417 
3418   bool isVolatile = I.isVolatile();
3419   bool isNonTemporal = I.getMetadata(LLVMContext::MD_nontemporal) != nullptr;
3420   bool isInvariant = I.getMetadata(LLVMContext::MD_invariant_load) != nullptr;
3421   bool isDereferenceable = isDereferenceablePointer(SV, DAG.getDataLayout());
3422   unsigned Alignment = I.getAlignment();
3423 
3424   AAMDNodes AAInfo;
3425   I.getAAMetadata(AAInfo);
3426   const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
3427 
3428   SmallVector<EVT, 4> ValueVTs;
3429   SmallVector<uint64_t, 4> Offsets;
3430   ComputeValueVTs(TLI, DAG.getDataLayout(), Ty, ValueVTs, &Offsets);
3431   unsigned NumValues = ValueVTs.size();
3432   if (NumValues == 0)
3433     return;
3434 
3435   SDValue Root;
3436   bool ConstantMemory = false;
3437   if (isVolatile || NumValues > MaxParallelChains)
3438     // Serialize volatile loads with other side effects.
3439     Root = getRoot();
3440   else if (AA->pointsToConstantMemory(MemoryLocation(
3441                SV, DAG.getDataLayout().getTypeStoreSize(Ty), AAInfo))) {
3442     // Do not serialize (non-volatile) loads of constant memory with anything.
3443     Root = DAG.getEntryNode();
3444     ConstantMemory = true;
3445   } else {
3446     // Do not serialize non-volatile loads against each other.
3447     Root = DAG.getRoot();
3448   }
3449 
3450   SDLoc dl = getCurSDLoc();
3451 
3452   if (isVolatile)
3453     Root = TLI.prepareVolatileOrAtomicLoad(Root, dl, DAG);
3454 
3455   // An aggregate load cannot wrap around the address space, so offsets to its
3456   // parts don't wrap either.
3457   SDNodeFlags Flags;
3458   Flags.setNoUnsignedWrap(true);
3459 
3460   SmallVector<SDValue, 4> Values(NumValues);
3461   SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
3462   EVT PtrVT = Ptr.getValueType();
3463   unsigned ChainI = 0;
3464   for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
3465     // Serializing loads here may result in excessive register pressure, and
3466     // TokenFactor places arbitrary choke points on the scheduler. SD scheduling
3467     // could recover a bit by hoisting nodes upward in the chain by recognizing
3468     // they are side-effect free or do not alias. The optimizer should really
3469     // avoid this case by converting large object/array copies to llvm.memcpy
3470     // (MaxParallelChains should always remain as failsafe).
3471     if (ChainI == MaxParallelChains) {
3472       assert(PendingLoads.empty() && "PendingLoads must be serialized first");
3473       SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3474                                   makeArrayRef(Chains.data(), ChainI));
3475       Root = Chain;
3476       ChainI = 0;
3477     }
3478     SDValue A = DAG.getNode(ISD::ADD, dl,
3479                             PtrVT, Ptr,
3480                             DAG.getConstant(Offsets[i], dl, PtrVT),
3481                             &Flags);
3482     auto MMOFlags = MachineMemOperand::MONone;
3483     if (isVolatile)
3484       MMOFlags |= MachineMemOperand::MOVolatile;
3485     if (isNonTemporal)
3486       MMOFlags |= MachineMemOperand::MONonTemporal;
3487     if (isInvariant)
3488       MMOFlags |= MachineMemOperand::MOInvariant;
3489     if (isDereferenceable)
3490       MMOFlags |= MachineMemOperand::MODereferenceable;
3491 
3492     SDValue L = DAG.getLoad(ValueVTs[i], dl, Root, A,
3493                             MachinePointerInfo(SV, Offsets[i]), Alignment,
3494                             MMOFlags, AAInfo, Ranges);
3495 
3496     Values[i] = L;
3497     Chains[ChainI] = L.getValue(1);
3498   }
3499 
3500   if (!ConstantMemory) {
3501     SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3502                                 makeArrayRef(Chains.data(), ChainI));
3503     if (isVolatile)
3504       DAG.setRoot(Chain);
3505     else
3506       PendingLoads.push_back(Chain);
3507   }
3508 
3509   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, dl,
3510                            DAG.getVTList(ValueVTs), Values));
3511 }
3512 
3513 void SelectionDAGBuilder::visitStoreToSwiftError(const StoreInst &I) {
3514   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3515   assert(TLI.supportSwiftError() &&
3516          "call visitStoreToSwiftError when backend supports swifterror");
3517 
3518   SmallVector<EVT, 4> ValueVTs;
3519   SmallVector<uint64_t, 4> Offsets;
3520   const Value *SrcV = I.getOperand(0);
3521   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
3522                   SrcV->getType(), ValueVTs, &Offsets);
3523   assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
3524          "expect a single EVT for swifterror");
3525 
3526   SDValue Src = getValue(SrcV);
3527   // Create a virtual register, then update the virtual register.
3528   auto &DL = DAG.getDataLayout();
3529   const TargetRegisterClass *RC = TLI.getRegClassFor(TLI.getPointerTy(DL));
3530   unsigned VReg = FuncInfo.MF->getRegInfo().createVirtualRegister(RC);
3531   // Chain, DL, Reg, N or Chain, DL, Reg, N, Glue
3532   // Chain can be getRoot or getControlRoot.
3533   SDValue CopyNode = DAG.getCopyToReg(getRoot(), getCurSDLoc(), VReg,
3534                                       SDValue(Src.getNode(), Src.getResNo()));
3535   DAG.setRoot(CopyNode);
3536   FuncInfo.setCurrentSwiftErrorVReg(FuncInfo.MBB, I.getOperand(1), VReg);
3537 }
3538 
3539 void SelectionDAGBuilder::visitLoadFromSwiftError(const LoadInst &I) {
3540   assert(DAG.getTargetLoweringInfo().supportSwiftError() &&
3541          "call visitLoadFromSwiftError when backend supports swifterror");
3542 
3543   assert(!I.isVolatile() &&
3544          I.getMetadata(LLVMContext::MD_nontemporal) == nullptr &&
3545          I.getMetadata(LLVMContext::MD_invariant_load) == nullptr &&
3546          "Support volatile, non temporal, invariant for load_from_swift_error");
3547 
3548   const Value *SV = I.getOperand(0);
3549   Type *Ty = I.getType();
3550   AAMDNodes AAInfo;
3551   I.getAAMetadata(AAInfo);
3552   assert(!AA->pointsToConstantMemory(MemoryLocation(
3553              SV, DAG.getDataLayout().getTypeStoreSize(Ty), AAInfo)) &&
3554          "load_from_swift_error should not be constant memory");
3555 
3556   SmallVector<EVT, 4> ValueVTs;
3557   SmallVector<uint64_t, 4> Offsets;
3558   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), Ty,
3559                   ValueVTs, &Offsets);
3560   assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
3561          "expect a single EVT for swifterror");
3562 
3563   // Chain, DL, Reg, VT, Glue or Chain, DL, Reg, VT
3564   SDValue L = DAG.getCopyFromReg(
3565       getRoot(), getCurSDLoc(),
3566       FuncInfo.getOrCreateSwiftErrorVReg(FuncInfo.MBB, SV), ValueVTs[0]);
3567 
3568   setValue(&I, L);
3569 }
3570 
3571 void SelectionDAGBuilder::visitStore(const StoreInst &I) {
3572   if (I.isAtomic())
3573     return visitAtomicStore(I);
3574 
3575   const Value *SrcV = I.getOperand(0);
3576   const Value *PtrV = I.getOperand(1);
3577 
3578   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3579   if (TLI.supportSwiftError()) {
3580     // Swifterror values can come from either a function parameter with
3581     // swifterror attribute or an alloca with swifterror attribute.
3582     if (const Argument *Arg = dyn_cast<Argument>(PtrV)) {
3583       if (Arg->hasSwiftErrorAttr())
3584         return visitStoreToSwiftError(I);
3585     }
3586 
3587     if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
3588       if (Alloca->isSwiftError())
3589         return visitStoreToSwiftError(I);
3590     }
3591   }
3592 
3593   SmallVector<EVT, 4> ValueVTs;
3594   SmallVector<uint64_t, 4> Offsets;
3595   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
3596                   SrcV->getType(), ValueVTs, &Offsets);
3597   unsigned NumValues = ValueVTs.size();
3598   if (NumValues == 0)
3599     return;
3600 
3601   // Get the lowered operands. Note that we do this after
3602   // checking if NumResults is zero, because with zero results
3603   // the operands won't have values in the map.
3604   SDValue Src = getValue(SrcV);
3605   SDValue Ptr = getValue(PtrV);
3606 
3607   SDValue Root = getRoot();
3608   SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
3609   SDLoc dl = getCurSDLoc();
3610   EVT PtrVT = Ptr.getValueType();
3611   unsigned Alignment = I.getAlignment();
3612   AAMDNodes AAInfo;
3613   I.getAAMetadata(AAInfo);
3614 
3615   auto MMOFlags = MachineMemOperand::MONone;
3616   if (I.isVolatile())
3617     MMOFlags |= MachineMemOperand::MOVolatile;
3618   if (I.getMetadata(LLVMContext::MD_nontemporal) != nullptr)
3619     MMOFlags |= MachineMemOperand::MONonTemporal;
3620 
3621   // An aggregate load cannot wrap around the address space, so offsets to its
3622   // parts don't wrap either.
3623   SDNodeFlags Flags;
3624   Flags.setNoUnsignedWrap(true);
3625 
3626   unsigned ChainI = 0;
3627   for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
3628     // See visitLoad comments.
3629     if (ChainI == MaxParallelChains) {
3630       SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3631                                   makeArrayRef(Chains.data(), ChainI));
3632       Root = Chain;
3633       ChainI = 0;
3634     }
3635     SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, Ptr,
3636                               DAG.getConstant(Offsets[i], dl, PtrVT), &Flags);
3637     SDValue St = DAG.getStore(
3638         Root, dl, SDValue(Src.getNode(), Src.getResNo() + i), Add,
3639         MachinePointerInfo(PtrV, Offsets[i]), Alignment, MMOFlags, AAInfo);
3640     Chains[ChainI] = St;
3641   }
3642 
3643   SDValue StoreNode = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3644                                   makeArrayRef(Chains.data(), ChainI));
3645   DAG.setRoot(StoreNode);
3646 }
3647 
3648 void SelectionDAGBuilder::visitMaskedStore(const CallInst &I,
3649                                            bool IsCompressing) {
3650   SDLoc sdl = getCurSDLoc();
3651 
3652   auto getMaskedStoreOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
3653                            unsigned& Alignment) {
3654     // llvm.masked.store.*(Src0, Ptr, alignment, Mask)
3655     Src0 = I.getArgOperand(0);
3656     Ptr = I.getArgOperand(1);
3657     Alignment = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue();
3658     Mask = I.getArgOperand(3);
3659   };
3660   auto getCompressingStoreOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
3661                            unsigned& Alignment) {
3662     // llvm.masked.compressstore.*(Src0, Ptr, Mask)
3663     Src0 = I.getArgOperand(0);
3664     Ptr = I.getArgOperand(1);
3665     Mask = I.getArgOperand(2);
3666     Alignment = 0;
3667   };
3668 
3669   Value  *PtrOperand, *MaskOperand, *Src0Operand;
3670   unsigned Alignment;
3671   if (IsCompressing)
3672     getCompressingStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
3673   else
3674     getMaskedStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
3675 
3676   SDValue Ptr = getValue(PtrOperand);
3677   SDValue Src0 = getValue(Src0Operand);
3678   SDValue Mask = getValue(MaskOperand);
3679 
3680   EVT VT = Src0.getValueType();
3681   if (!Alignment)
3682     Alignment = DAG.getEVTAlignment(VT);
3683 
3684   AAMDNodes AAInfo;
3685   I.getAAMetadata(AAInfo);
3686 
3687   MachineMemOperand *MMO =
3688     DAG.getMachineFunction().
3689     getMachineMemOperand(MachinePointerInfo(PtrOperand),
3690                           MachineMemOperand::MOStore,  VT.getStoreSize(),
3691                           Alignment, AAInfo);
3692   SDValue StoreNode = DAG.getMaskedStore(getRoot(), sdl, Src0, Ptr, Mask, VT,
3693                                          MMO, false /* Truncating */,
3694                                          IsCompressing);
3695   DAG.setRoot(StoreNode);
3696   setValue(&I, StoreNode);
3697 }
3698 
3699 // Get a uniform base for the Gather/Scatter intrinsic.
3700 // The first argument of the Gather/Scatter intrinsic is a vector of pointers.
3701 // We try to represent it as a base pointer + vector of indices.
3702 // Usually, the vector of pointers comes from a 'getelementptr' instruction.
3703 // The first operand of the GEP may be a single pointer or a vector of pointers
3704 // Example:
3705 //   %gep.ptr = getelementptr i32, <8 x i32*> %vptr, <8 x i32> %ind
3706 //  or
3707 //   %gep.ptr = getelementptr i32, i32* %ptr,        <8 x i32> %ind
3708 // %res = call <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> %gep.ptr, ..
3709 //
3710 // When the first GEP operand is a single pointer - it is the uniform base we
3711 // are looking for. If first operand of the GEP is a splat vector - we
3712 // extract the spalt value and use it as a uniform base.
3713 // In all other cases the function returns 'false'.
3714 //
3715 static bool getUniformBase(const Value* &Ptr, SDValue& Base, SDValue& Index,
3716                            SelectionDAGBuilder* SDB) {
3717 
3718   SelectionDAG& DAG = SDB->DAG;
3719   LLVMContext &Context = *DAG.getContext();
3720 
3721   assert(Ptr->getType()->isVectorTy() && "Uexpected pointer type");
3722   const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
3723   if (!GEP || GEP->getNumOperands() > 2)
3724     return false;
3725 
3726   const Value *GEPPtr = GEP->getPointerOperand();
3727   if (!GEPPtr->getType()->isVectorTy())
3728     Ptr = GEPPtr;
3729   else if (!(Ptr = getSplatValue(GEPPtr)))
3730     return false;
3731 
3732   Value *IndexVal = GEP->getOperand(1);
3733 
3734   // The operands of the GEP may be defined in another basic block.
3735   // In this case we'll not find nodes for the operands.
3736   if (!SDB->findValue(Ptr) || !SDB->findValue(IndexVal))
3737     return false;
3738 
3739   Base = SDB->getValue(Ptr);
3740   Index = SDB->getValue(IndexVal);
3741 
3742   // Suppress sign extension.
3743   if (SExtInst* Sext = dyn_cast<SExtInst>(IndexVal)) {
3744     if (SDB->findValue(Sext->getOperand(0))) {
3745       IndexVal = Sext->getOperand(0);
3746       Index = SDB->getValue(IndexVal);
3747     }
3748   }
3749   if (!Index.getValueType().isVector()) {
3750     unsigned GEPWidth = GEP->getType()->getVectorNumElements();
3751     EVT VT = EVT::getVectorVT(Context, Index.getValueType(), GEPWidth);
3752     Index = DAG.getSplatBuildVector(VT, SDLoc(Index), Index);
3753   }
3754   return true;
3755 }
3756 
3757 void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
3758   SDLoc sdl = getCurSDLoc();
3759 
3760   // llvm.masked.scatter.*(Src0, Ptrs, alignemt, Mask)
3761   const Value *Ptr = I.getArgOperand(1);
3762   SDValue Src0 = getValue(I.getArgOperand(0));
3763   SDValue Mask = getValue(I.getArgOperand(3));
3764   EVT VT = Src0.getValueType();
3765   unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(2)))->getZExtValue();
3766   if (!Alignment)
3767     Alignment = DAG.getEVTAlignment(VT);
3768   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3769 
3770   AAMDNodes AAInfo;
3771   I.getAAMetadata(AAInfo);
3772 
3773   SDValue Base;
3774   SDValue Index;
3775   const Value *BasePtr = Ptr;
3776   bool UniformBase = getUniformBase(BasePtr, Base, Index, this);
3777 
3778   const Value *MemOpBasePtr = UniformBase ? BasePtr : nullptr;
3779   MachineMemOperand *MMO = DAG.getMachineFunction().
3780     getMachineMemOperand(MachinePointerInfo(MemOpBasePtr),
3781                          MachineMemOperand::MOStore,  VT.getStoreSize(),
3782                          Alignment, AAInfo);
3783   if (!UniformBase) {
3784     Base = DAG.getTargetConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
3785     Index = getValue(Ptr);
3786   }
3787   SDValue Ops[] = { getRoot(), Src0, Mask, Base, Index };
3788   SDValue Scatter = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), VT, sdl,
3789                                          Ops, MMO);
3790   DAG.setRoot(Scatter);
3791   setValue(&I, Scatter);
3792 }
3793 
3794 void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) {
3795   SDLoc sdl = getCurSDLoc();
3796 
3797   auto getMaskedLoadOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
3798                            unsigned& Alignment) {
3799     // @llvm.masked.load.*(Ptr, alignment, Mask, Src0)
3800     Ptr = I.getArgOperand(0);
3801     Alignment = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
3802     Mask = I.getArgOperand(2);
3803     Src0 = I.getArgOperand(3);
3804   };
3805   auto getExpandingLoadOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
3806                            unsigned& Alignment) {
3807     // @llvm.masked.expandload.*(Ptr, Mask, Src0)
3808     Ptr = I.getArgOperand(0);
3809     Alignment = 0;
3810     Mask = I.getArgOperand(1);
3811     Src0 = I.getArgOperand(2);
3812   };
3813 
3814   Value  *PtrOperand, *MaskOperand, *Src0Operand;
3815   unsigned Alignment;
3816   if (IsExpanding)
3817     getExpandingLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
3818   else
3819     getMaskedLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
3820 
3821   SDValue Ptr = getValue(PtrOperand);
3822   SDValue Src0 = getValue(Src0Operand);
3823   SDValue Mask = getValue(MaskOperand);
3824 
3825   EVT VT = Src0.getValueType();
3826   if (!Alignment)
3827     Alignment = DAG.getEVTAlignment(VT);
3828 
3829   AAMDNodes AAInfo;
3830   I.getAAMetadata(AAInfo);
3831   const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
3832 
3833   // Do not serialize masked loads of constant memory with anything.
3834   bool AddToChain = !AA->pointsToConstantMemory(MemoryLocation(
3835       PtrOperand, DAG.getDataLayout().getTypeStoreSize(I.getType()), AAInfo));
3836   SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
3837 
3838   MachineMemOperand *MMO =
3839     DAG.getMachineFunction().
3840     getMachineMemOperand(MachinePointerInfo(PtrOperand),
3841                           MachineMemOperand::MOLoad,  VT.getStoreSize(),
3842                           Alignment, AAInfo, Ranges);
3843 
3844   SDValue Load = DAG.getMaskedLoad(VT, sdl, InChain, Ptr, Mask, Src0, VT, MMO,
3845                                    ISD::NON_EXTLOAD, IsExpanding);
3846   if (AddToChain) {
3847     SDValue OutChain = Load.getValue(1);
3848     DAG.setRoot(OutChain);
3849   }
3850   setValue(&I, Load);
3851 }
3852 
3853 void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
3854   SDLoc sdl = getCurSDLoc();
3855 
3856   // @llvm.masked.gather.*(Ptrs, alignment, Mask, Src0)
3857   const Value *Ptr = I.getArgOperand(0);
3858   SDValue Src0 = getValue(I.getArgOperand(3));
3859   SDValue Mask = getValue(I.getArgOperand(2));
3860 
3861   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3862   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3863   unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(1)))->getZExtValue();
3864   if (!Alignment)
3865     Alignment = DAG.getEVTAlignment(VT);
3866 
3867   AAMDNodes AAInfo;
3868   I.getAAMetadata(AAInfo);
3869   const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
3870 
3871   SDValue Root = DAG.getRoot();
3872   SDValue Base;
3873   SDValue Index;
3874   const Value *BasePtr = Ptr;
3875   bool UniformBase = getUniformBase(BasePtr, Base, Index, this);
3876   bool ConstantMemory = false;
3877   if (UniformBase &&
3878       AA->pointsToConstantMemory(MemoryLocation(
3879           BasePtr, DAG.getDataLayout().getTypeStoreSize(I.getType()),
3880           AAInfo))) {
3881     // Do not serialize (non-volatile) loads of constant memory with anything.
3882     Root = DAG.getEntryNode();
3883     ConstantMemory = true;
3884   }
3885 
3886   MachineMemOperand *MMO =
3887     DAG.getMachineFunction().
3888     getMachineMemOperand(MachinePointerInfo(UniformBase ? BasePtr : nullptr),
3889                          MachineMemOperand::MOLoad,  VT.getStoreSize(),
3890                          Alignment, AAInfo, Ranges);
3891 
3892   if (!UniformBase) {
3893     Base = DAG.getTargetConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
3894     Index = getValue(Ptr);
3895   }
3896   SDValue Ops[] = { Root, Src0, Mask, Base, Index };
3897   SDValue Gather = DAG.getMaskedGather(DAG.getVTList(VT, MVT::Other), VT, sdl,
3898                                        Ops, MMO);
3899 
3900   SDValue OutChain = Gather.getValue(1);
3901   if (!ConstantMemory)
3902     PendingLoads.push_back(OutChain);
3903   setValue(&I, Gather);
3904 }
3905 
3906 void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
3907   SDLoc dl = getCurSDLoc();
3908   AtomicOrdering SuccessOrder = I.getSuccessOrdering();
3909   AtomicOrdering FailureOrder = I.getFailureOrdering();
3910   SynchronizationScope Scope = I.getSynchScope();
3911 
3912   SDValue InChain = getRoot();
3913 
3914   MVT MemVT = getValue(I.getCompareOperand()).getSimpleValueType();
3915   SDVTList VTs = DAG.getVTList(MemVT, MVT::i1, MVT::Other);
3916   SDValue L = DAG.getAtomicCmpSwap(
3917       ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, dl, MemVT, VTs, InChain,
3918       getValue(I.getPointerOperand()), getValue(I.getCompareOperand()),
3919       getValue(I.getNewValOperand()), MachinePointerInfo(I.getPointerOperand()),
3920       /*Alignment=*/ 0, SuccessOrder, FailureOrder, Scope);
3921 
3922   SDValue OutChain = L.getValue(2);
3923 
3924   setValue(&I, L);
3925   DAG.setRoot(OutChain);
3926 }
3927 
3928 void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
3929   SDLoc dl = getCurSDLoc();
3930   ISD::NodeType NT;
3931   switch (I.getOperation()) {
3932   default: llvm_unreachable("Unknown atomicrmw operation");
3933   case AtomicRMWInst::Xchg: NT = ISD::ATOMIC_SWAP; break;
3934   case AtomicRMWInst::Add:  NT = ISD::ATOMIC_LOAD_ADD; break;
3935   case AtomicRMWInst::Sub:  NT = ISD::ATOMIC_LOAD_SUB; break;
3936   case AtomicRMWInst::And:  NT = ISD::ATOMIC_LOAD_AND; break;
3937   case AtomicRMWInst::Nand: NT = ISD::ATOMIC_LOAD_NAND; break;
3938   case AtomicRMWInst::Or:   NT = ISD::ATOMIC_LOAD_OR; break;
3939   case AtomicRMWInst::Xor:  NT = ISD::ATOMIC_LOAD_XOR; break;
3940   case AtomicRMWInst::Max:  NT = ISD::ATOMIC_LOAD_MAX; break;
3941   case AtomicRMWInst::Min:  NT = ISD::ATOMIC_LOAD_MIN; break;
3942   case AtomicRMWInst::UMax: NT = ISD::ATOMIC_LOAD_UMAX; break;
3943   case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break;
3944   }
3945   AtomicOrdering Order = I.getOrdering();
3946   SynchronizationScope Scope = I.getSynchScope();
3947 
3948   SDValue InChain = getRoot();
3949 
3950   SDValue L =
3951     DAG.getAtomic(NT, dl,
3952                   getValue(I.getValOperand()).getSimpleValueType(),
3953                   InChain,
3954                   getValue(I.getPointerOperand()),
3955                   getValue(I.getValOperand()),
3956                   I.getPointerOperand(),
3957                   /* Alignment=*/ 0, Order, Scope);
3958 
3959   SDValue OutChain = L.getValue(1);
3960 
3961   setValue(&I, L);
3962   DAG.setRoot(OutChain);
3963 }
3964 
3965 void SelectionDAGBuilder::visitFence(const FenceInst &I) {
3966   SDLoc dl = getCurSDLoc();
3967   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3968   SDValue Ops[3];
3969   Ops[0] = getRoot();
3970   Ops[1] = DAG.getConstant((unsigned)I.getOrdering(), dl,
3971                            TLI.getFenceOperandTy(DAG.getDataLayout()));
3972   Ops[2] = DAG.getConstant(I.getSynchScope(), dl,
3973                            TLI.getFenceOperandTy(DAG.getDataLayout()));
3974   DAG.setRoot(DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops));
3975 }
3976 
3977 void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
3978   SDLoc dl = getCurSDLoc();
3979   AtomicOrdering Order = I.getOrdering();
3980   SynchronizationScope Scope = I.getSynchScope();
3981 
3982   SDValue InChain = getRoot();
3983 
3984   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3985   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3986 
3987   if (I.getAlignment() < VT.getSizeInBits() / 8)
3988     report_fatal_error("Cannot generate unaligned atomic load");
3989 
3990   MachineMemOperand *MMO =
3991       DAG.getMachineFunction().
3992       getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
3993                            MachineMemOperand::MOVolatile |
3994                            MachineMemOperand::MOLoad,
3995                            VT.getStoreSize(),
3996                            I.getAlignment() ? I.getAlignment() :
3997                                               DAG.getEVTAlignment(VT),
3998                            AAMDNodes(), nullptr, Scope, Order);
3999 
4000   InChain = TLI.prepareVolatileOrAtomicLoad(InChain, dl, DAG);
4001   SDValue L =
4002       DAG.getAtomic(ISD::ATOMIC_LOAD, dl, VT, VT, InChain,
4003                     getValue(I.getPointerOperand()), MMO);
4004 
4005   SDValue OutChain = L.getValue(1);
4006 
4007   setValue(&I, L);
4008   DAG.setRoot(OutChain);
4009 }
4010 
4011 void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
4012   SDLoc dl = getCurSDLoc();
4013 
4014   AtomicOrdering Order = I.getOrdering();
4015   SynchronizationScope Scope = I.getSynchScope();
4016 
4017   SDValue InChain = getRoot();
4018 
4019   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4020   EVT VT =
4021       TLI.getValueType(DAG.getDataLayout(), I.getValueOperand()->getType());
4022 
4023   if (I.getAlignment() < VT.getSizeInBits() / 8)
4024     report_fatal_error("Cannot generate unaligned atomic store");
4025 
4026   SDValue OutChain =
4027     DAG.getAtomic(ISD::ATOMIC_STORE, dl, VT,
4028                   InChain,
4029                   getValue(I.getPointerOperand()),
4030                   getValue(I.getValueOperand()),
4031                   I.getPointerOperand(), I.getAlignment(),
4032                   Order, Scope);
4033 
4034   DAG.setRoot(OutChain);
4035 }
4036 
4037 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
4038 /// node.
4039 void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
4040                                                unsigned Intrinsic) {
4041   // Ignore the callsite's attributes. A specific call site may be marked with
4042   // readnone, but the lowering code will expect the chain based on the
4043   // definition.
4044   const Function *F = I.getCalledFunction();
4045   bool HasChain = !F->doesNotAccessMemory();
4046   bool OnlyLoad = HasChain && F->onlyReadsMemory();
4047 
4048   // Build the operand list.
4049   SmallVector<SDValue, 8> Ops;
4050   if (HasChain) {  // If this intrinsic has side-effects, chainify it.
4051     if (OnlyLoad) {
4052       // We don't need to serialize loads against other loads.
4053       Ops.push_back(DAG.getRoot());
4054     } else {
4055       Ops.push_back(getRoot());
4056     }
4057   }
4058 
4059   // Info is set by getTgtMemInstrinsic
4060   TargetLowering::IntrinsicInfo Info;
4061   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4062   bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I, Intrinsic);
4063 
4064   // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
4065   if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID ||
4066       Info.opc == ISD::INTRINSIC_W_CHAIN)
4067     Ops.push_back(DAG.getTargetConstant(Intrinsic, getCurSDLoc(),
4068                                         TLI.getPointerTy(DAG.getDataLayout())));
4069 
4070   // Add all operands of the call to the operand list.
4071   for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
4072     SDValue Op = getValue(I.getArgOperand(i));
4073     Ops.push_back(Op);
4074   }
4075 
4076   SmallVector<EVT, 4> ValueVTs;
4077   ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);
4078 
4079   if (HasChain)
4080     ValueVTs.push_back(MVT::Other);
4081 
4082   SDVTList VTs = DAG.getVTList(ValueVTs);
4083 
4084   // Create the node.
4085   SDValue Result;
4086   if (IsTgtIntrinsic) {
4087     // This is target intrinsic that touches memory
4088     Result = DAG.getMemIntrinsicNode(Info.opc, getCurSDLoc(),
4089                                      VTs, Ops, Info.memVT,
4090                                    MachinePointerInfo(Info.ptrVal, Info.offset),
4091                                      Info.align, Info.vol,
4092                                      Info.readMem, Info.writeMem, Info.size);
4093   } else if (!HasChain) {
4094     Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurSDLoc(), VTs, Ops);
4095   } else if (!I.getType()->isVoidTy()) {
4096     Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurSDLoc(), VTs, Ops);
4097   } else {
4098     Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops);
4099   }
4100 
4101   if (HasChain) {
4102     SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
4103     if (OnlyLoad)
4104       PendingLoads.push_back(Chain);
4105     else
4106       DAG.setRoot(Chain);
4107   }
4108 
4109   if (!I.getType()->isVoidTy()) {
4110     if (VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
4111       EVT VT = TLI.getValueType(DAG.getDataLayout(), PTy);
4112       Result = DAG.getNode(ISD::BITCAST, getCurSDLoc(), VT, Result);
4113     } else
4114       Result = lowerRangeToAssertZExt(DAG, I, Result);
4115 
4116     setValue(&I, Result);
4117   }
4118 }
4119 
4120 /// GetSignificand - Get the significand and build it into a floating-point
4121 /// number with exponent of 1:
4122 ///
4123 ///   Op = (Op & 0x007fffff) | 0x3f800000;
4124 ///
4125 /// where Op is the hexadecimal representation of floating point value.
4126 static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl) {
4127   SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
4128                            DAG.getConstant(0x007fffff, dl, MVT::i32));
4129   SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
4130                            DAG.getConstant(0x3f800000, dl, MVT::i32));
4131   return DAG.getNode(ISD::BITCAST, dl, MVT::f32, t2);
4132 }
4133 
4134 /// GetExponent - Get the exponent:
4135 ///
4136 ///   (float)(int)(((Op & 0x7f800000) >> 23) - 127);
4137 ///
4138 /// where Op is the hexadecimal representation of floating point value.
4139 static SDValue GetExponent(SelectionDAG &DAG, SDValue Op,
4140                            const TargetLowering &TLI, const SDLoc &dl) {
4141   SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
4142                            DAG.getConstant(0x7f800000, dl, MVT::i32));
4143   SDValue t1 = DAG.getNode(
4144       ISD::SRL, dl, MVT::i32, t0,
4145       DAG.getConstant(23, dl, TLI.getPointerTy(DAG.getDataLayout())));
4146   SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
4147                            DAG.getConstant(127, dl, MVT::i32));
4148   return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
4149 }
4150 
4151 /// getF32Constant - Get 32-bit floating point constant.
4152 static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt,
4153                               const SDLoc &dl) {
4154   return DAG.getConstantFP(APFloat(APFloat::IEEEsingle(), APInt(32, Flt)), dl,
4155                            MVT::f32);
4156 }
4157 
4158 static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl,
4159                                        SelectionDAG &DAG) {
4160   // TODO: What fast-math-flags should be set on the floating-point nodes?
4161 
4162   //   IntegerPartOfX = ((int32_t)(t0);
4163   SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
4164 
4165   //   FractionalPartOfX = t0 - (float)IntegerPartOfX;
4166   SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
4167   SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
4168 
4169   //   IntegerPartOfX <<= 23;
4170   IntegerPartOfX = DAG.getNode(
4171       ISD::SHL, dl, MVT::i32, IntegerPartOfX,
4172       DAG.getConstant(23, dl, DAG.getTargetLoweringInfo().getPointerTy(
4173                                   DAG.getDataLayout())));
4174 
4175   SDValue TwoToFractionalPartOfX;
4176   if (LimitFloatPrecision <= 6) {
4177     // For floating-point precision of 6:
4178     //
4179     //   TwoToFractionalPartOfX =
4180     //     0.997535578f +
4181     //       (0.735607626f + 0.252464424f * x) * x;
4182     //
4183     // error 0.0144103317, which is 6 bits
4184     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4185                              getF32Constant(DAG, 0x3e814304, dl));
4186     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4187                              getF32Constant(DAG, 0x3f3c50c8, dl));
4188     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4189     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4190                                          getF32Constant(DAG, 0x3f7f5e7e, dl));
4191   } else if (LimitFloatPrecision <= 12) {
4192     // For floating-point precision of 12:
4193     //
4194     //   TwoToFractionalPartOfX =
4195     //     0.999892986f +
4196     //       (0.696457318f +
4197     //         (0.224338339f + 0.792043434e-1f * x) * x) * x;
4198     //
4199     // error 0.000107046256, which is 13 to 14 bits
4200     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4201                              getF32Constant(DAG, 0x3da235e3, dl));
4202     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4203                              getF32Constant(DAG, 0x3e65b8f3, dl));
4204     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4205     SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4206                              getF32Constant(DAG, 0x3f324b07, dl));
4207     SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4208     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4209                                          getF32Constant(DAG, 0x3f7ff8fd, dl));
4210   } else { // LimitFloatPrecision <= 18
4211     // For floating-point precision of 18:
4212     //
4213     //   TwoToFractionalPartOfX =
4214     //     0.999999982f +
4215     //       (0.693148872f +
4216     //         (0.240227044f +
4217     //           (0.554906021e-1f +
4218     //             (0.961591928e-2f +
4219     //               (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
4220     // error 2.47208000*10^(-7), which is better than 18 bits
4221     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4222                              getF32Constant(DAG, 0x3924b03e, dl));
4223     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4224                              getF32Constant(DAG, 0x3ab24b87, dl));
4225     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4226     SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4227                              getF32Constant(DAG, 0x3c1d8c17, dl));
4228     SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4229     SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4230                              getF32Constant(DAG, 0x3d634a1d, dl));
4231     SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4232     SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4233                              getF32Constant(DAG, 0x3e75fe14, dl));
4234     SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4235     SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
4236                               getF32Constant(DAG, 0x3f317234, dl));
4237     SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
4238     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
4239                                          getF32Constant(DAG, 0x3f800000, dl));
4240   }
4241 
4242   // Add the exponent into the result in integer domain.
4243   SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, TwoToFractionalPartOfX);
4244   return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
4245                      DAG.getNode(ISD::ADD, dl, MVT::i32, t13, IntegerPartOfX));
4246 }
4247 
4248 /// expandExp - Lower an exp intrinsic. Handles the special sequences for
4249 /// limited-precision mode.
4250 static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4251                          const TargetLowering &TLI) {
4252   if (Op.getValueType() == MVT::f32 &&
4253       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4254 
4255     // Put the exponent in the right bit position for later addition to the
4256     // final result:
4257     //
4258     //   #define LOG2OFe 1.4426950f
4259     //   t0 = Op * LOG2OFe
4260 
4261     // TODO: What fast-math-flags should be set here?
4262     SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
4263                              getF32Constant(DAG, 0x3fb8aa3b, dl));
4264     return getLimitedPrecisionExp2(t0, dl, DAG);
4265   }
4266 
4267   // No special expansion.
4268   return DAG.getNode(ISD::FEXP, dl, Op.getValueType(), Op);
4269 }
4270 
4271 /// expandLog - Lower a log intrinsic. Handles the special sequences for
4272 /// limited-precision mode.
4273 static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4274                          const TargetLowering &TLI) {
4275 
4276   // TODO: What fast-math-flags should be set on the floating-point nodes?
4277 
4278   if (Op.getValueType() == MVT::f32 &&
4279       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4280     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
4281 
4282     // Scale the exponent by log(2) [0.69314718f].
4283     SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
4284     SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
4285                                         getF32Constant(DAG, 0x3f317218, dl));
4286 
4287     // Get the significand and build it into a floating-point number with
4288     // exponent of 1.
4289     SDValue X = GetSignificand(DAG, Op1, dl);
4290 
4291     SDValue LogOfMantissa;
4292     if (LimitFloatPrecision <= 6) {
4293       // For floating-point precision of 6:
4294       //
4295       //   LogofMantissa =
4296       //     -1.1609546f +
4297       //       (1.4034025f - 0.23903021f * x) * x;
4298       //
4299       // error 0.0034276066, which is better than 8 bits
4300       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4301                                getF32Constant(DAG, 0xbe74c456, dl));
4302       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4303                                getF32Constant(DAG, 0x3fb3a2b1, dl));
4304       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4305       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4306                                   getF32Constant(DAG, 0x3f949a29, dl));
4307     } else if (LimitFloatPrecision <= 12) {
4308       // For floating-point precision of 12:
4309       //
4310       //   LogOfMantissa =
4311       //     -1.7417939f +
4312       //       (2.8212026f +
4313       //         (-1.4699568f +
4314       //           (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
4315       //
4316       // error 0.000061011436, which is 14 bits
4317       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4318                                getF32Constant(DAG, 0xbd67b6d6, dl));
4319       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4320                                getF32Constant(DAG, 0x3ee4f4b8, dl));
4321       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4322       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4323                                getF32Constant(DAG, 0x3fbc278b, dl));
4324       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4325       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4326                                getF32Constant(DAG, 0x40348e95, dl));
4327       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4328       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4329                                   getF32Constant(DAG, 0x3fdef31a, dl));
4330     } else { // LimitFloatPrecision <= 18
4331       // For floating-point precision of 18:
4332       //
4333       //   LogOfMantissa =
4334       //     -2.1072184f +
4335       //       (4.2372794f +
4336       //         (-3.7029485f +
4337       //           (2.2781945f +
4338       //             (-0.87823314f +
4339       //               (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
4340       //
4341       // error 0.0000023660568, which is better than 18 bits
4342       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4343                                getF32Constant(DAG, 0xbc91e5ac, dl));
4344       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4345                                getF32Constant(DAG, 0x3e4350aa, dl));
4346       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4347       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4348                                getF32Constant(DAG, 0x3f60d3e3, dl));
4349       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4350       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4351                                getF32Constant(DAG, 0x4011cdf0, dl));
4352       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4353       SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4354                                getF32Constant(DAG, 0x406cfd1c, dl));
4355       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4356       SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4357                                getF32Constant(DAG, 0x408797cb, dl));
4358       SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4359       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
4360                                   getF32Constant(DAG, 0x4006dcab, dl));
4361     }
4362 
4363     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, LogOfMantissa);
4364   }
4365 
4366   // No special expansion.
4367   return DAG.getNode(ISD::FLOG, dl, Op.getValueType(), Op);
4368 }
4369 
4370 /// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for
4371 /// limited-precision mode.
4372 static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4373                           const TargetLowering &TLI) {
4374 
4375   // TODO: What fast-math-flags should be set on the floating-point nodes?
4376 
4377   if (Op.getValueType() == MVT::f32 &&
4378       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4379     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
4380 
4381     // Get the exponent.
4382     SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
4383 
4384     // Get the significand and build it into a floating-point number with
4385     // exponent of 1.
4386     SDValue X = GetSignificand(DAG, Op1, dl);
4387 
4388     // Different possible minimax approximations of significand in
4389     // floating-point for various degrees of accuracy over [1,2].
4390     SDValue Log2ofMantissa;
4391     if (LimitFloatPrecision <= 6) {
4392       // For floating-point precision of 6:
4393       //
4394       //   Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
4395       //
4396       // error 0.0049451742, which is more than 7 bits
4397       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4398                                getF32Constant(DAG, 0xbeb08fe0, dl));
4399       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4400                                getF32Constant(DAG, 0x40019463, dl));
4401       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4402       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4403                                    getF32Constant(DAG, 0x3fd6633d, dl));
4404     } else if (LimitFloatPrecision <= 12) {
4405       // For floating-point precision of 12:
4406       //
4407       //   Log2ofMantissa =
4408       //     -2.51285454f +
4409       //       (4.07009056f +
4410       //         (-2.12067489f +
4411       //           (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
4412       //
4413       // error 0.0000876136000, which is better than 13 bits
4414       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4415                                getF32Constant(DAG, 0xbda7262e, dl));
4416       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4417                                getF32Constant(DAG, 0x3f25280b, dl));
4418       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4419       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4420                                getF32Constant(DAG, 0x4007b923, dl));
4421       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4422       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4423                                getF32Constant(DAG, 0x40823e2f, dl));
4424       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4425       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4426                                    getF32Constant(DAG, 0x4020d29c, dl));
4427     } else { // LimitFloatPrecision <= 18
4428       // For floating-point precision of 18:
4429       //
4430       //   Log2ofMantissa =
4431       //     -3.0400495f +
4432       //       (6.1129976f +
4433       //         (-5.3420409f +
4434       //           (3.2865683f +
4435       //             (-1.2669343f +
4436       //               (0.27515199f -
4437       //                 0.25691327e-1f * x) * x) * x) * x) * x) * x;
4438       //
4439       // error 0.0000018516, which is better than 18 bits
4440       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4441                                getF32Constant(DAG, 0xbcd2769e, dl));
4442       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4443                                getF32Constant(DAG, 0x3e8ce0b9, dl));
4444       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4445       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4446                                getF32Constant(DAG, 0x3fa22ae7, dl));
4447       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4448       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4449                                getF32Constant(DAG, 0x40525723, dl));
4450       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4451       SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4452                                getF32Constant(DAG, 0x40aaf200, dl));
4453       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4454       SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4455                                getF32Constant(DAG, 0x40c39dad, dl));
4456       SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4457       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
4458                                    getF32Constant(DAG, 0x4042902c, dl));
4459     }
4460 
4461     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
4462   }
4463 
4464   // No special expansion.
4465   return DAG.getNode(ISD::FLOG2, dl, Op.getValueType(), Op);
4466 }
4467 
4468 /// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for
4469 /// limited-precision mode.
4470 static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4471                            const TargetLowering &TLI) {
4472 
4473   // TODO: What fast-math-flags should be set on the floating-point nodes?
4474 
4475   if (Op.getValueType() == MVT::f32 &&
4476       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4477     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
4478 
4479     // Scale the exponent by log10(2) [0.30102999f].
4480     SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
4481     SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
4482                                         getF32Constant(DAG, 0x3e9a209a, dl));
4483 
4484     // Get the significand and build it into a floating-point number with
4485     // exponent of 1.
4486     SDValue X = GetSignificand(DAG, Op1, dl);
4487 
4488     SDValue Log10ofMantissa;
4489     if (LimitFloatPrecision <= 6) {
4490       // For floating-point precision of 6:
4491       //
4492       //   Log10ofMantissa =
4493       //     -0.50419619f +
4494       //       (0.60948995f - 0.10380950f * x) * x;
4495       //
4496       // error 0.0014886165, which is 6 bits
4497       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4498                                getF32Constant(DAG, 0xbdd49a13, dl));
4499       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4500                                getF32Constant(DAG, 0x3f1c0789, dl));
4501       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4502       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4503                                     getF32Constant(DAG, 0x3f011300, dl));
4504     } else if (LimitFloatPrecision <= 12) {
4505       // For floating-point precision of 12:
4506       //
4507       //   Log10ofMantissa =
4508       //     -0.64831180f +
4509       //       (0.91751397f +
4510       //         (-0.31664806f + 0.47637168e-1f * x) * x) * x;
4511       //
4512       // error 0.00019228036, which is better than 12 bits
4513       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4514                                getF32Constant(DAG, 0x3d431f31, dl));
4515       SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
4516                                getF32Constant(DAG, 0x3ea21fb2, dl));
4517       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4518       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4519                                getF32Constant(DAG, 0x3f6ae232, dl));
4520       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4521       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
4522                                     getF32Constant(DAG, 0x3f25f7c3, dl));
4523     } else { // LimitFloatPrecision <= 18
4524       // For floating-point precision of 18:
4525       //
4526       //   Log10ofMantissa =
4527       //     -0.84299375f +
4528       //       (1.5327582f +
4529       //         (-1.0688956f +
4530       //           (0.49102474f +
4531       //             (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
4532       //
4533       // error 0.0000037995730, which is better than 18 bits
4534       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4535                                getF32Constant(DAG, 0x3c5d51ce, dl));
4536       SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
4537                                getF32Constant(DAG, 0x3e00685a, dl));
4538       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4539       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4540                                getF32Constant(DAG, 0x3efb6798, dl));
4541       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4542       SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
4543                                getF32Constant(DAG, 0x3f88d192, dl));
4544       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4545       SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4546                                getF32Constant(DAG, 0x3fc4316c, dl));
4547       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4548       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
4549                                     getF32Constant(DAG, 0x3f57ce70, dl));
4550     }
4551 
4552     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
4553   }
4554 
4555   // No special expansion.
4556   return DAG.getNode(ISD::FLOG10, dl, Op.getValueType(), Op);
4557 }
4558 
4559 /// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for
4560 /// limited-precision mode.
4561 static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4562                           const TargetLowering &TLI) {
4563   if (Op.getValueType() == MVT::f32 &&
4564       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18)
4565     return getLimitedPrecisionExp2(Op, dl, DAG);
4566 
4567   // No special expansion.
4568   return DAG.getNode(ISD::FEXP2, dl, Op.getValueType(), Op);
4569 }
4570 
4571 /// visitPow - Lower a pow intrinsic. Handles the special sequences for
4572 /// limited-precision mode with x == 10.0f.
4573 static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS,
4574                          SelectionDAG &DAG, const TargetLowering &TLI) {
4575   bool IsExp10 = false;
4576   if (LHS.getValueType() == MVT::f32 && RHS.getValueType() == MVT::f32 &&
4577       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4578     if (ConstantFPSDNode *LHSC = dyn_cast<ConstantFPSDNode>(LHS)) {
4579       APFloat Ten(10.0f);
4580       IsExp10 = LHSC->isExactlyValue(Ten);
4581     }
4582   }
4583 
4584   // TODO: What fast-math-flags should be set on the FMUL node?
4585   if (IsExp10) {
4586     // Put the exponent in the right bit position for later addition to the
4587     // final result:
4588     //
4589     //   #define LOG2OF10 3.3219281f
4590     //   t0 = Op * LOG2OF10;
4591     SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, RHS,
4592                              getF32Constant(DAG, 0x40549a78, dl));
4593     return getLimitedPrecisionExp2(t0, dl, DAG);
4594   }
4595 
4596   // No special expansion.
4597   return DAG.getNode(ISD::FPOW, dl, LHS.getValueType(), LHS, RHS);
4598 }
4599 
4600 
4601 /// ExpandPowI - Expand a llvm.powi intrinsic.
4602 static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS,
4603                           SelectionDAG &DAG) {
4604   // If RHS is a constant, we can expand this out to a multiplication tree,
4605   // otherwise we end up lowering to a call to __powidf2 (for example).  When
4606   // optimizing for size, we only want to do this if the expansion would produce
4607   // a small number of multiplies, otherwise we do the full expansion.
4608   if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
4609     // Get the exponent as a positive value.
4610     unsigned Val = RHSC->getSExtValue();
4611     if ((int)Val < 0) Val = -Val;
4612 
4613     // powi(x, 0) -> 1.0
4614     if (Val == 0)
4615       return DAG.getConstantFP(1.0, DL, LHS.getValueType());
4616 
4617     const Function *F = DAG.getMachineFunction().getFunction();
4618     if (!F->optForSize() ||
4619         // If optimizing for size, don't insert too many multiplies.
4620         // This inserts up to 5 multiplies.
4621         countPopulation(Val) + Log2_32(Val) < 7) {
4622       // We use the simple binary decomposition method to generate the multiply
4623       // sequence.  There are more optimal ways to do this (for example,
4624       // powi(x,15) generates one more multiply than it should), but this has
4625       // the benefit of being both really simple and much better than a libcall.
4626       SDValue Res;  // Logically starts equal to 1.0
4627       SDValue CurSquare = LHS;
4628       // TODO: Intrinsics should have fast-math-flags that propagate to these
4629       // nodes.
4630       while (Val) {
4631         if (Val & 1) {
4632           if (Res.getNode())
4633             Res = DAG.getNode(ISD::FMUL, DL,Res.getValueType(), Res, CurSquare);
4634           else
4635             Res = CurSquare;  // 1.0*CurSquare.
4636         }
4637 
4638         CurSquare = DAG.getNode(ISD::FMUL, DL, CurSquare.getValueType(),
4639                                 CurSquare, CurSquare);
4640         Val >>= 1;
4641       }
4642 
4643       // If the original was negative, invert the result, producing 1/(x*x*x).
4644       if (RHSC->getSExtValue() < 0)
4645         Res = DAG.getNode(ISD::FDIV, DL, LHS.getValueType(),
4646                           DAG.getConstantFP(1.0, DL, LHS.getValueType()), Res);
4647       return Res;
4648     }
4649   }
4650 
4651   // Otherwise, expand to a libcall.
4652   return DAG.getNode(ISD::FPOWI, DL, LHS.getValueType(), LHS, RHS);
4653 }
4654 
4655 // getUnderlyingArgReg - Find underlying register used for a truncated or
4656 // bitcasted argument.
4657 static unsigned getUnderlyingArgReg(const SDValue &N) {
4658   switch (N.getOpcode()) {
4659   case ISD::CopyFromReg:
4660     return cast<RegisterSDNode>(N.getOperand(1))->getReg();
4661   case ISD::BITCAST:
4662   case ISD::AssertZext:
4663   case ISD::AssertSext:
4664   case ISD::TRUNCATE:
4665     return getUnderlyingArgReg(N.getOperand(0));
4666   default:
4667     return 0;
4668   }
4669 }
4670 
4671 /// EmitFuncArgumentDbgValue - If the DbgValueInst is a dbg_value of a function
4672 /// argument, create the corresponding DBG_VALUE machine instruction for it now.
4673 /// At the end of instruction selection, they will be inserted to the entry BB.
4674 bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
4675     const Value *V, DILocalVariable *Variable, DIExpression *Expr,
4676     DILocation *DL, int64_t Offset, bool IsDbgDeclare, const SDValue &N) {
4677   const Argument *Arg = dyn_cast<Argument>(V);
4678   if (!Arg)
4679     return false;
4680 
4681   MachineFunction &MF = DAG.getMachineFunction();
4682   const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
4683 
4684   // Ignore inlined function arguments here.
4685   //
4686   // FIXME: Should we be checking DL->inlinedAt() to determine this?
4687   if (!Variable->getScope()->getSubprogram()->describes(MF.getFunction()))
4688     return false;
4689 
4690   bool IsIndirect = false;
4691   Optional<MachineOperand> Op;
4692   // Some arguments' frame index is recorded during argument lowering.
4693   if (int FI = FuncInfo.getArgumentFrameIndex(Arg))
4694     Op = MachineOperand::CreateFI(FI);
4695 
4696   if (!Op && N.getNode()) {
4697     unsigned Reg = getUnderlyingArgReg(N);
4698     if (Reg && TargetRegisterInfo::isVirtualRegister(Reg)) {
4699       MachineRegisterInfo &RegInfo = MF.getRegInfo();
4700       unsigned PR = RegInfo.getLiveInPhysReg(Reg);
4701       if (PR)
4702         Reg = PR;
4703     }
4704     if (Reg) {
4705       Op = MachineOperand::CreateReg(Reg, false);
4706       IsIndirect = IsDbgDeclare;
4707     }
4708   }
4709 
4710   if (!Op) {
4711     // Check if ValueMap has reg number.
4712     DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
4713     if (VMI != FuncInfo.ValueMap.end()) {
4714       Op = MachineOperand::CreateReg(VMI->second, false);
4715       IsIndirect = IsDbgDeclare;
4716     }
4717   }
4718 
4719   if (!Op && N.getNode())
4720     // Check if frame index is available.
4721     if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(N.getNode()))
4722       if (FrameIndexSDNode *FINode =
4723           dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
4724         Op = MachineOperand::CreateFI(FINode->getIndex());
4725 
4726   if (!Op)
4727     return false;
4728 
4729   assert(Variable->isValidLocationForIntrinsic(DL) &&
4730          "Expected inlined-at fields to agree");
4731   if (Op->isReg())
4732     FuncInfo.ArgDbgValues.push_back(
4733         BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsIndirect,
4734                 Op->getReg(), Offset, Variable, Expr));
4735   else
4736     FuncInfo.ArgDbgValues.push_back(
4737         BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE))
4738             .add(*Op)
4739             .addImm(Offset)
4740             .addMetadata(Variable)
4741             .addMetadata(Expr));
4742 
4743   return true;
4744 }
4745 
4746 /// Return the appropriate SDDbgValue based on N.
4747 SDDbgValue *SelectionDAGBuilder::getDbgValue(SDValue N,
4748                                              DILocalVariable *Variable,
4749                                              DIExpression *Expr, int64_t Offset,
4750                                              const DebugLoc &dl,
4751                                              unsigned DbgSDNodeOrder) {
4752   SDDbgValue *SDV;
4753   auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode());
4754   if (FISDN && Expr->startsWithDeref()) {
4755     // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can describe
4756     // stack slot locations as such instead of as indirectly addressed
4757     // locations.
4758     ArrayRef<uint64_t> TrailingElements(Expr->elements_begin() + 1,
4759                                         Expr->elements_end());
4760     DIExpression *DerefedDIExpr =
4761         DIExpression::get(*DAG.getContext(), TrailingElements);
4762     int FI = FISDN->getIndex();
4763     SDV = DAG.getFrameIndexDbgValue(Variable, DerefedDIExpr, FI, 0, dl,
4764                                     DbgSDNodeOrder);
4765   } else {
4766     SDV = DAG.getDbgValue(Variable, Expr, N.getNode(), N.getResNo(), false,
4767                           Offset, dl, DbgSDNodeOrder);
4768   }
4769   return SDV;
4770 }
4771 
4772 // VisualStudio defines setjmp as _setjmp
4773 #if defined(_MSC_VER) && defined(setjmp) && \
4774                          !defined(setjmp_undefined_for_msvc)
4775 #  pragma push_macro("setjmp")
4776 #  undef setjmp
4777 #  define setjmp_undefined_for_msvc
4778 #endif
4779 
4780 /// Lower the call to the specified intrinsic function. If we want to emit this
4781 /// as a call to a named external function, return the name. Otherwise, lower it
4782 /// and return null.
4783 const char *
4784 SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
4785   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4786   SDLoc sdl = getCurSDLoc();
4787   DebugLoc dl = getCurDebugLoc();
4788   SDValue Res;
4789 
4790   switch (Intrinsic) {
4791   default:
4792     // By default, turn this into a target intrinsic node.
4793     visitTargetIntrinsic(I, Intrinsic);
4794     return nullptr;
4795   case Intrinsic::vastart:  visitVAStart(I); return nullptr;
4796   case Intrinsic::vaend:    visitVAEnd(I); return nullptr;
4797   case Intrinsic::vacopy:   visitVACopy(I); return nullptr;
4798   case Intrinsic::returnaddress:
4799     setValue(&I, DAG.getNode(ISD::RETURNADDR, sdl,
4800                              TLI.getPointerTy(DAG.getDataLayout()),
4801                              getValue(I.getArgOperand(0))));
4802     return nullptr;
4803   case Intrinsic::addressofreturnaddress:
4804     setValue(&I, DAG.getNode(ISD::ADDROFRETURNADDR, sdl,
4805                              TLI.getPointerTy(DAG.getDataLayout())));
4806     return nullptr;
4807   case Intrinsic::frameaddress:
4808     setValue(&I, DAG.getNode(ISD::FRAMEADDR, sdl,
4809                              TLI.getPointerTy(DAG.getDataLayout()),
4810                              getValue(I.getArgOperand(0))));
4811     return nullptr;
4812   case Intrinsic::read_register: {
4813     Value *Reg = I.getArgOperand(0);
4814     SDValue Chain = getRoot();
4815     SDValue RegName =
4816         DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
4817     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4818     Res = DAG.getNode(ISD::READ_REGISTER, sdl,
4819       DAG.getVTList(VT, MVT::Other), Chain, RegName);
4820     setValue(&I, Res);
4821     DAG.setRoot(Res.getValue(1));
4822     return nullptr;
4823   }
4824   case Intrinsic::write_register: {
4825     Value *Reg = I.getArgOperand(0);
4826     Value *RegValue = I.getArgOperand(1);
4827     SDValue Chain = getRoot();
4828     SDValue RegName =
4829         DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
4830     DAG.setRoot(DAG.getNode(ISD::WRITE_REGISTER, sdl, MVT::Other, Chain,
4831                             RegName, getValue(RegValue)));
4832     return nullptr;
4833   }
4834   case Intrinsic::setjmp:
4835     return &"_setjmp"[!TLI.usesUnderscoreSetJmp()];
4836   case Intrinsic::longjmp:
4837     return &"_longjmp"[!TLI.usesUnderscoreLongJmp()];
4838   case Intrinsic::memcpy: {
4839     SDValue Op1 = getValue(I.getArgOperand(0));
4840     SDValue Op2 = getValue(I.getArgOperand(1));
4841     SDValue Op3 = getValue(I.getArgOperand(2));
4842     unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
4843     if (!Align)
4844       Align = 1; // @llvm.memcpy defines 0 and 1 to both mean no alignment.
4845     bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
4846     bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
4847     SDValue MC = DAG.getMemcpy(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
4848                                false, isTC,
4849                                MachinePointerInfo(I.getArgOperand(0)),
4850                                MachinePointerInfo(I.getArgOperand(1)));
4851     updateDAGForMaybeTailCall(MC);
4852     return nullptr;
4853   }
4854   case Intrinsic::memset: {
4855     SDValue Op1 = getValue(I.getArgOperand(0));
4856     SDValue Op2 = getValue(I.getArgOperand(1));
4857     SDValue Op3 = getValue(I.getArgOperand(2));
4858     unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
4859     if (!Align)
4860       Align = 1; // @llvm.memset defines 0 and 1 to both mean no alignment.
4861     bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
4862     bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
4863     SDValue MS = DAG.getMemset(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
4864                                isTC, MachinePointerInfo(I.getArgOperand(0)));
4865     updateDAGForMaybeTailCall(MS);
4866     return nullptr;
4867   }
4868   case Intrinsic::memmove: {
4869     SDValue Op1 = getValue(I.getArgOperand(0));
4870     SDValue Op2 = getValue(I.getArgOperand(1));
4871     SDValue Op3 = getValue(I.getArgOperand(2));
4872     unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
4873     if (!Align)
4874       Align = 1; // @llvm.memmove defines 0 and 1 to both mean no alignment.
4875     bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
4876     bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
4877     SDValue MM = DAG.getMemmove(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
4878                                 isTC, MachinePointerInfo(I.getArgOperand(0)),
4879                                 MachinePointerInfo(I.getArgOperand(1)));
4880     updateDAGForMaybeTailCall(MM);
4881     return nullptr;
4882   }
4883   case Intrinsic::memcpy_element_atomic: {
4884     SDValue Dst = getValue(I.getArgOperand(0));
4885     SDValue Src = getValue(I.getArgOperand(1));
4886     SDValue NumElements = getValue(I.getArgOperand(2));
4887     SDValue ElementSize = getValue(I.getArgOperand(3));
4888 
4889     // Emit a library call.
4890     TargetLowering::ArgListTy Args;
4891     TargetLowering::ArgListEntry Entry;
4892     Entry.Ty = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
4893     Entry.Node = Dst;
4894     Args.push_back(Entry);
4895 
4896     Entry.Node = Src;
4897     Args.push_back(Entry);
4898 
4899     Entry.Ty = I.getArgOperand(2)->getType();
4900     Entry.Node = NumElements;
4901     Args.push_back(Entry);
4902 
4903     Entry.Ty = Type::getInt32Ty(*DAG.getContext());
4904     Entry.Node = ElementSize;
4905     Args.push_back(Entry);
4906 
4907     uint64_t ElementSizeConstant =
4908         cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
4909     RTLIB::Libcall LibraryCall =
4910         RTLIB::getMEMCPY_ELEMENT_ATOMIC(ElementSizeConstant);
4911     if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
4912       report_fatal_error("Unsupported element size");
4913 
4914     TargetLowering::CallLoweringInfo CLI(DAG);
4915     CLI.setDebugLoc(sdl).setChain(getRoot()).setLibCallee(
4916         TLI.getLibcallCallingConv(LibraryCall),
4917         Type::getVoidTy(*DAG.getContext()),
4918         DAG.getExternalSymbol(TLI.getLibcallName(LibraryCall),
4919                               TLI.getPointerTy(DAG.getDataLayout())),
4920         std::move(Args));
4921 
4922     std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
4923     DAG.setRoot(CallResult.second);
4924     return nullptr;
4925   }
4926   case Intrinsic::dbg_declare: {
4927     const DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
4928     DILocalVariable *Variable = DI.getVariable();
4929     DIExpression *Expression = DI.getExpression();
4930     const Value *Address = DI.getAddress();
4931     assert(Variable && "Missing variable");
4932     if (!Address) {
4933       DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
4934       return nullptr;
4935     }
4936 
4937     // Check if address has undef value.
4938     if (isa<UndefValue>(Address) ||
4939         (Address->use_empty() && !isa<Argument>(Address))) {
4940       DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
4941       return nullptr;
4942     }
4943 
4944     SDValue &N = NodeMap[Address];
4945     if (!N.getNode() && isa<Argument>(Address))
4946       // Check unused arguments map.
4947       N = UnusedArgNodeMap[Address];
4948     SDDbgValue *SDV;
4949     if (N.getNode()) {
4950       if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
4951         Address = BCI->getOperand(0);
4952       // Parameters are handled specially.
4953       bool isParameter = Variable->isParameter() || isa<Argument>(Address);
4954       auto FINode = dyn_cast<FrameIndexSDNode>(N.getNode());
4955       if (isParameter && FINode) {
4956         // Byval parameter. We have a frame index at this point.
4957         SDV = DAG.getFrameIndexDbgValue(Variable, Expression,
4958                                         FINode->getIndex(), 0, dl, SDNodeOrder);
4959       } else if (isa<Argument>(Address)) {
4960         // Address is an argument, so try to emit its dbg value using
4961         // virtual register info from the FuncInfo.ValueMap.
4962         EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, 0, true, N);
4963         return nullptr;
4964       } else {
4965         SDV = DAG.getDbgValue(Variable, Expression, N.getNode(), N.getResNo(),
4966                               true, 0, dl, SDNodeOrder);
4967       }
4968       DAG.AddDbgValue(SDV, N.getNode(), isParameter);
4969     } else {
4970       // If Address is an argument then try to emit its dbg value using
4971       // virtual register info from the FuncInfo.ValueMap.
4972       if (!EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, 0, true,
4973                                     N)) {
4974         // If variable is pinned by a alloca in dominating bb then
4975         // use StaticAllocaMap.
4976         if (const AllocaInst *AI = dyn_cast<AllocaInst>(Address)) {
4977           if (AI->getParent() != DI.getParent()) {
4978             DenseMap<const AllocaInst*, int>::iterator SI =
4979               FuncInfo.StaticAllocaMap.find(AI);
4980             if (SI != FuncInfo.StaticAllocaMap.end()) {
4981               SDV = DAG.getFrameIndexDbgValue(Variable, Expression, SI->second,
4982                                               0, dl, SDNodeOrder);
4983               DAG.AddDbgValue(SDV, nullptr, false);
4984               return nullptr;
4985             }
4986           }
4987         }
4988         DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
4989       }
4990     }
4991     return nullptr;
4992   }
4993   case Intrinsic::dbg_value: {
4994     const DbgValueInst &DI = cast<DbgValueInst>(I);
4995     assert(DI.getVariable() && "Missing variable");
4996 
4997     DILocalVariable *Variable = DI.getVariable();
4998     DIExpression *Expression = DI.getExpression();
4999     uint64_t Offset = DI.getOffset();
5000     const Value *V = DI.getValue();
5001     if (!V)
5002       return nullptr;
5003 
5004     SDDbgValue *SDV;
5005     if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V)) {
5006       SDV = DAG.getConstantDbgValue(Variable, Expression, V, Offset, dl,
5007                                     SDNodeOrder);
5008       DAG.AddDbgValue(SDV, nullptr, false);
5009     } else {
5010       // Do not use getValue() in here; we don't want to generate code at
5011       // this point if it hasn't been done yet.
5012       SDValue N = NodeMap[V];
5013       if (!N.getNode() && isa<Argument>(V))
5014         // Check unused arguments map.
5015         N = UnusedArgNodeMap[V];
5016       if (N.getNode()) {
5017         if (!EmitFuncArgumentDbgValue(V, Variable, Expression, dl, Offset,
5018                                       false, N)) {
5019           SDV = getDbgValue(N, Variable, Expression, Offset, dl, SDNodeOrder);
5020           DAG.AddDbgValue(SDV, N.getNode(), false);
5021         }
5022       } else if (!V->use_empty() ) {
5023         // Do not call getValue(V) yet, as we don't want to generate code.
5024         // Remember it for later.
5025         DanglingDebugInfo DDI(&DI, dl, SDNodeOrder);
5026         DanglingDebugInfoMap[V] = DDI;
5027       } else {
5028         // We may expand this to cover more cases.  One case where we have no
5029         // data available is an unreferenced parameter.
5030         DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
5031       }
5032     }
5033 
5034     // Build a debug info table entry.
5035     if (const BitCastInst *BCI = dyn_cast<BitCastInst>(V))
5036       V = BCI->getOperand(0);
5037     const AllocaInst *AI = dyn_cast<AllocaInst>(V);
5038     // Don't handle byval struct arguments or VLAs, for example.
5039     if (!AI) {
5040       DEBUG(dbgs() << "Dropping debug location info for:\n  " << DI << "\n");
5041       DEBUG(dbgs() << "  Last seen at:\n    " << *V << "\n");
5042       return nullptr;
5043     }
5044     DenseMap<const AllocaInst*, int>::iterator SI =
5045       FuncInfo.StaticAllocaMap.find(AI);
5046     if (SI == FuncInfo.StaticAllocaMap.end())
5047       return nullptr; // VLAs.
5048     return nullptr;
5049   }
5050 
5051   case Intrinsic::eh_typeid_for: {
5052     // Find the type id for the given typeinfo.
5053     GlobalValue *GV = ExtractTypeInfo(I.getArgOperand(0));
5054     unsigned TypeID = DAG.getMachineFunction().getTypeIDFor(GV);
5055     Res = DAG.getConstant(TypeID, sdl, MVT::i32);
5056     setValue(&I, Res);
5057     return nullptr;
5058   }
5059 
5060   case Intrinsic::eh_return_i32:
5061   case Intrinsic::eh_return_i64:
5062     DAG.getMachineFunction().setCallsEHReturn(true);
5063     DAG.setRoot(DAG.getNode(ISD::EH_RETURN, sdl,
5064                             MVT::Other,
5065                             getControlRoot(),
5066                             getValue(I.getArgOperand(0)),
5067                             getValue(I.getArgOperand(1))));
5068     return nullptr;
5069   case Intrinsic::eh_unwind_init:
5070     DAG.getMachineFunction().setCallsUnwindInit(true);
5071     return nullptr;
5072   case Intrinsic::eh_dwarf_cfa: {
5073     setValue(&I, DAG.getNode(ISD::EH_DWARF_CFA, sdl,
5074                              TLI.getPointerTy(DAG.getDataLayout()),
5075                              getValue(I.getArgOperand(0))));
5076     return nullptr;
5077   }
5078   case Intrinsic::eh_sjlj_callsite: {
5079     MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
5080     ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(0));
5081     assert(CI && "Non-constant call site value in eh.sjlj.callsite!");
5082     assert(MMI.getCurrentCallSite() == 0 && "Overlapping call sites!");
5083 
5084     MMI.setCurrentCallSite(CI->getZExtValue());
5085     return nullptr;
5086   }
5087   case Intrinsic::eh_sjlj_functioncontext: {
5088     // Get and store the index of the function context.
5089     MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
5090     AllocaInst *FnCtx =
5091       cast<AllocaInst>(I.getArgOperand(0)->stripPointerCasts());
5092     int FI = FuncInfo.StaticAllocaMap[FnCtx];
5093     MFI.setFunctionContextIndex(FI);
5094     return nullptr;
5095   }
5096   case Intrinsic::eh_sjlj_setjmp: {
5097     SDValue Ops[2];
5098     Ops[0] = getRoot();
5099     Ops[1] = getValue(I.getArgOperand(0));
5100     SDValue Op = DAG.getNode(ISD::EH_SJLJ_SETJMP, sdl,
5101                              DAG.getVTList(MVT::i32, MVT::Other), Ops);
5102     setValue(&I, Op.getValue(0));
5103     DAG.setRoot(Op.getValue(1));
5104     return nullptr;
5105   }
5106   case Intrinsic::eh_sjlj_longjmp: {
5107     DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, sdl, MVT::Other,
5108                             getRoot(), getValue(I.getArgOperand(0))));
5109     return nullptr;
5110   }
5111   case Intrinsic::eh_sjlj_setup_dispatch: {
5112     DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_SETUP_DISPATCH, sdl, MVT::Other,
5113                             getRoot()));
5114     return nullptr;
5115   }
5116 
5117   case Intrinsic::masked_gather:
5118     visitMaskedGather(I);
5119     return nullptr;
5120   case Intrinsic::masked_load:
5121     visitMaskedLoad(I);
5122     return nullptr;
5123   case Intrinsic::masked_scatter:
5124     visitMaskedScatter(I);
5125     return nullptr;
5126   case Intrinsic::masked_store:
5127     visitMaskedStore(I);
5128     return nullptr;
5129   case Intrinsic::masked_expandload:
5130     visitMaskedLoad(I, true /* IsExpanding */);
5131     return nullptr;
5132   case Intrinsic::masked_compressstore:
5133     visitMaskedStore(I, true /* IsCompressing */);
5134     return nullptr;
5135   case Intrinsic::x86_mmx_pslli_w:
5136   case Intrinsic::x86_mmx_pslli_d:
5137   case Intrinsic::x86_mmx_pslli_q:
5138   case Intrinsic::x86_mmx_psrli_w:
5139   case Intrinsic::x86_mmx_psrli_d:
5140   case Intrinsic::x86_mmx_psrli_q:
5141   case Intrinsic::x86_mmx_psrai_w:
5142   case Intrinsic::x86_mmx_psrai_d: {
5143     SDValue ShAmt = getValue(I.getArgOperand(1));
5144     if (isa<ConstantSDNode>(ShAmt)) {
5145       visitTargetIntrinsic(I, Intrinsic);
5146       return nullptr;
5147     }
5148     unsigned NewIntrinsic = 0;
5149     EVT ShAmtVT = MVT::v2i32;
5150     switch (Intrinsic) {
5151     case Intrinsic::x86_mmx_pslli_w:
5152       NewIntrinsic = Intrinsic::x86_mmx_psll_w;
5153       break;
5154     case Intrinsic::x86_mmx_pslli_d:
5155       NewIntrinsic = Intrinsic::x86_mmx_psll_d;
5156       break;
5157     case Intrinsic::x86_mmx_pslli_q:
5158       NewIntrinsic = Intrinsic::x86_mmx_psll_q;
5159       break;
5160     case Intrinsic::x86_mmx_psrli_w:
5161       NewIntrinsic = Intrinsic::x86_mmx_psrl_w;
5162       break;
5163     case Intrinsic::x86_mmx_psrli_d:
5164       NewIntrinsic = Intrinsic::x86_mmx_psrl_d;
5165       break;
5166     case Intrinsic::x86_mmx_psrli_q:
5167       NewIntrinsic = Intrinsic::x86_mmx_psrl_q;
5168       break;
5169     case Intrinsic::x86_mmx_psrai_w:
5170       NewIntrinsic = Intrinsic::x86_mmx_psra_w;
5171       break;
5172     case Intrinsic::x86_mmx_psrai_d:
5173       NewIntrinsic = Intrinsic::x86_mmx_psra_d;
5174       break;
5175     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
5176     }
5177 
5178     // The vector shift intrinsics with scalars uses 32b shift amounts but
5179     // the sse2/mmx shift instructions reads 64 bits. Set the upper 32 bits
5180     // to be zero.
5181     // We must do this early because v2i32 is not a legal type.
5182     SDValue ShOps[2];
5183     ShOps[0] = ShAmt;
5184     ShOps[1] = DAG.getConstant(0, sdl, MVT::i32);
5185     ShAmt =  DAG.getBuildVector(ShAmtVT, sdl, ShOps);
5186     EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5187     ShAmt = DAG.getNode(ISD::BITCAST, sdl, DestVT, ShAmt);
5188     Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, sdl, DestVT,
5189                        DAG.getConstant(NewIntrinsic, sdl, MVT::i32),
5190                        getValue(I.getArgOperand(0)), ShAmt);
5191     setValue(&I, Res);
5192     return nullptr;
5193   }
5194   case Intrinsic::powi:
5195     setValue(&I, ExpandPowI(sdl, getValue(I.getArgOperand(0)),
5196                             getValue(I.getArgOperand(1)), DAG));
5197     return nullptr;
5198   case Intrinsic::log:
5199     setValue(&I, expandLog(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5200     return nullptr;
5201   case Intrinsic::log2:
5202     setValue(&I, expandLog2(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5203     return nullptr;
5204   case Intrinsic::log10:
5205     setValue(&I, expandLog10(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5206     return nullptr;
5207   case Intrinsic::exp:
5208     setValue(&I, expandExp(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5209     return nullptr;
5210   case Intrinsic::exp2:
5211     setValue(&I, expandExp2(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5212     return nullptr;
5213   case Intrinsic::pow:
5214     setValue(&I, expandPow(sdl, getValue(I.getArgOperand(0)),
5215                            getValue(I.getArgOperand(1)), DAG, TLI));
5216     return nullptr;
5217   case Intrinsic::sqrt:
5218   case Intrinsic::fabs:
5219   case Intrinsic::sin:
5220   case Intrinsic::cos:
5221   case Intrinsic::floor:
5222   case Intrinsic::ceil:
5223   case Intrinsic::trunc:
5224   case Intrinsic::rint:
5225   case Intrinsic::nearbyint:
5226   case Intrinsic::round:
5227   case Intrinsic::canonicalize: {
5228     unsigned Opcode;
5229     switch (Intrinsic) {
5230     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
5231     case Intrinsic::sqrt:      Opcode = ISD::FSQRT;      break;
5232     case Intrinsic::fabs:      Opcode = ISD::FABS;       break;
5233     case Intrinsic::sin:       Opcode = ISD::FSIN;       break;
5234     case Intrinsic::cos:       Opcode = ISD::FCOS;       break;
5235     case Intrinsic::floor:     Opcode = ISD::FFLOOR;     break;
5236     case Intrinsic::ceil:      Opcode = ISD::FCEIL;      break;
5237     case Intrinsic::trunc:     Opcode = ISD::FTRUNC;     break;
5238     case Intrinsic::rint:      Opcode = ISD::FRINT;      break;
5239     case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break;
5240     case Intrinsic::round:     Opcode = ISD::FROUND;     break;
5241     case Intrinsic::canonicalize: Opcode = ISD::FCANONICALIZE; break;
5242     }
5243 
5244     setValue(&I, DAG.getNode(Opcode, sdl,
5245                              getValue(I.getArgOperand(0)).getValueType(),
5246                              getValue(I.getArgOperand(0))));
5247     return nullptr;
5248   }
5249   case Intrinsic::minnum: {
5250     auto VT = getValue(I.getArgOperand(0)).getValueType();
5251     unsigned Opc =
5252         I.hasNoNaNs() && TLI.isOperationLegalOrCustom(ISD::FMINNAN, VT)
5253             ? ISD::FMINNAN
5254             : ISD::FMINNUM;
5255     setValue(&I, DAG.getNode(Opc, sdl, VT,
5256                              getValue(I.getArgOperand(0)),
5257                              getValue(I.getArgOperand(1))));
5258     return nullptr;
5259   }
5260   case Intrinsic::maxnum: {
5261     auto VT = getValue(I.getArgOperand(0)).getValueType();
5262     unsigned Opc =
5263         I.hasNoNaNs() && TLI.isOperationLegalOrCustom(ISD::FMAXNAN, VT)
5264             ? ISD::FMAXNAN
5265             : ISD::FMAXNUM;
5266     setValue(&I, DAG.getNode(Opc, sdl, VT,
5267                              getValue(I.getArgOperand(0)),
5268                              getValue(I.getArgOperand(1))));
5269     return nullptr;
5270   }
5271   case Intrinsic::copysign:
5272     setValue(&I, DAG.getNode(ISD::FCOPYSIGN, sdl,
5273                              getValue(I.getArgOperand(0)).getValueType(),
5274                              getValue(I.getArgOperand(0)),
5275                              getValue(I.getArgOperand(1))));
5276     return nullptr;
5277   case Intrinsic::fma:
5278     setValue(&I, DAG.getNode(ISD::FMA, sdl,
5279                              getValue(I.getArgOperand(0)).getValueType(),
5280                              getValue(I.getArgOperand(0)),
5281                              getValue(I.getArgOperand(1)),
5282                              getValue(I.getArgOperand(2))));
5283     return nullptr;
5284   case Intrinsic::experimental_constrained_fadd:
5285   case Intrinsic::experimental_constrained_fsub:
5286   case Intrinsic::experimental_constrained_fmul:
5287   case Intrinsic::experimental_constrained_fdiv:
5288   case Intrinsic::experimental_constrained_frem:
5289     visitConstrainedFPIntrinsic(I, Intrinsic);
5290     return nullptr;
5291   case Intrinsic::fmuladd: {
5292     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5293     if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
5294         TLI.isFMAFasterThanFMulAndFAdd(VT)) {
5295       setValue(&I, DAG.getNode(ISD::FMA, sdl,
5296                                getValue(I.getArgOperand(0)).getValueType(),
5297                                getValue(I.getArgOperand(0)),
5298                                getValue(I.getArgOperand(1)),
5299                                getValue(I.getArgOperand(2))));
5300     } else {
5301       // TODO: Intrinsic calls should have fast-math-flags.
5302       SDValue Mul = DAG.getNode(ISD::FMUL, sdl,
5303                                 getValue(I.getArgOperand(0)).getValueType(),
5304                                 getValue(I.getArgOperand(0)),
5305                                 getValue(I.getArgOperand(1)));
5306       SDValue Add = DAG.getNode(ISD::FADD, sdl,
5307                                 getValue(I.getArgOperand(0)).getValueType(),
5308                                 Mul,
5309                                 getValue(I.getArgOperand(2)));
5310       setValue(&I, Add);
5311     }
5312     return nullptr;
5313   }
5314   case Intrinsic::convert_to_fp16:
5315     setValue(&I, DAG.getNode(ISD::BITCAST, sdl, MVT::i16,
5316                              DAG.getNode(ISD::FP_ROUND, sdl, MVT::f16,
5317                                          getValue(I.getArgOperand(0)),
5318                                          DAG.getTargetConstant(0, sdl,
5319                                                                MVT::i32))));
5320     return nullptr;
5321   case Intrinsic::convert_from_fp16:
5322     setValue(&I, DAG.getNode(ISD::FP_EXTEND, sdl,
5323                              TLI.getValueType(DAG.getDataLayout(), I.getType()),
5324                              DAG.getNode(ISD::BITCAST, sdl, MVT::f16,
5325                                          getValue(I.getArgOperand(0)))));
5326     return nullptr;
5327   case Intrinsic::pcmarker: {
5328     SDValue Tmp = getValue(I.getArgOperand(0));
5329     DAG.setRoot(DAG.getNode(ISD::PCMARKER, sdl, MVT::Other, getRoot(), Tmp));
5330     return nullptr;
5331   }
5332   case Intrinsic::readcyclecounter: {
5333     SDValue Op = getRoot();
5334     Res = DAG.getNode(ISD::READCYCLECOUNTER, sdl,
5335                       DAG.getVTList(MVT::i64, MVT::Other), Op);
5336     setValue(&I, Res);
5337     DAG.setRoot(Res.getValue(1));
5338     return nullptr;
5339   }
5340   case Intrinsic::bitreverse:
5341     setValue(&I, DAG.getNode(ISD::BITREVERSE, sdl,
5342                              getValue(I.getArgOperand(0)).getValueType(),
5343                              getValue(I.getArgOperand(0))));
5344     return nullptr;
5345   case Intrinsic::bswap:
5346     setValue(&I, DAG.getNode(ISD::BSWAP, sdl,
5347                              getValue(I.getArgOperand(0)).getValueType(),
5348                              getValue(I.getArgOperand(0))));
5349     return nullptr;
5350   case Intrinsic::cttz: {
5351     SDValue Arg = getValue(I.getArgOperand(0));
5352     ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
5353     EVT Ty = Arg.getValueType();
5354     setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_UNDEF,
5355                              sdl, Ty, Arg));
5356     return nullptr;
5357   }
5358   case Intrinsic::ctlz: {
5359     SDValue Arg = getValue(I.getArgOperand(0));
5360     ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
5361     EVT Ty = Arg.getValueType();
5362     setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_UNDEF,
5363                              sdl, Ty, Arg));
5364     return nullptr;
5365   }
5366   case Intrinsic::ctpop: {
5367     SDValue Arg = getValue(I.getArgOperand(0));
5368     EVT Ty = Arg.getValueType();
5369     setValue(&I, DAG.getNode(ISD::CTPOP, sdl, Ty, Arg));
5370     return nullptr;
5371   }
5372   case Intrinsic::stacksave: {
5373     SDValue Op = getRoot();
5374     Res = DAG.getNode(
5375         ISD::STACKSAVE, sdl,
5376         DAG.getVTList(TLI.getPointerTy(DAG.getDataLayout()), MVT::Other), Op);
5377     setValue(&I, Res);
5378     DAG.setRoot(Res.getValue(1));
5379     return nullptr;
5380   }
5381   case Intrinsic::stackrestore: {
5382     Res = getValue(I.getArgOperand(0));
5383     DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other, getRoot(), Res));
5384     return nullptr;
5385   }
5386   case Intrinsic::get_dynamic_area_offset: {
5387     SDValue Op = getRoot();
5388     EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
5389     EVT ResTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
5390     // Result type for @llvm.get.dynamic.area.offset should match PtrTy for
5391     // target.
5392     if (PtrTy != ResTy)
5393       report_fatal_error("Wrong result type for @llvm.get.dynamic.area.offset"
5394                          " intrinsic!");
5395     Res = DAG.getNode(ISD::GET_DYNAMIC_AREA_OFFSET, sdl, DAG.getVTList(ResTy),
5396                       Op);
5397     DAG.setRoot(Op);
5398     setValue(&I, Res);
5399     return nullptr;
5400   }
5401   case Intrinsic::stackguard: {
5402     EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
5403     MachineFunction &MF = DAG.getMachineFunction();
5404     const Module &M = *MF.getFunction()->getParent();
5405     SDValue Chain = getRoot();
5406     if (TLI.useLoadStackGuardNode()) {
5407       Res = getLoadStackGuard(DAG, sdl, Chain);
5408     } else {
5409       const Value *Global = TLI.getSDagStackGuard(M);
5410       unsigned Align = DL->getPrefTypeAlignment(Global->getType());
5411       Res = DAG.getLoad(PtrTy, sdl, Chain, getValue(Global),
5412                         MachinePointerInfo(Global, 0), Align,
5413                         MachineMemOperand::MOVolatile);
5414     }
5415     DAG.setRoot(Chain);
5416     setValue(&I, Res);
5417     return nullptr;
5418   }
5419   case Intrinsic::stackprotector: {
5420     // Emit code into the DAG to store the stack guard onto the stack.
5421     MachineFunction &MF = DAG.getMachineFunction();
5422     MachineFrameInfo &MFI = MF.getFrameInfo();
5423     EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
5424     SDValue Src, Chain = getRoot();
5425 
5426     if (TLI.useLoadStackGuardNode())
5427       Src = getLoadStackGuard(DAG, sdl, Chain);
5428     else
5429       Src = getValue(I.getArgOperand(0));   // The guard's value.
5430 
5431     AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
5432 
5433     int FI = FuncInfo.StaticAllocaMap[Slot];
5434     MFI.setStackProtectorIndex(FI);
5435 
5436     SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
5437 
5438     // Store the stack protector onto the stack.
5439     Res = DAG.getStore(Chain, sdl, Src, FIN, MachinePointerInfo::getFixedStack(
5440                                                  DAG.getMachineFunction(), FI),
5441                        /* Alignment = */ 0, MachineMemOperand::MOVolatile);
5442     setValue(&I, Res);
5443     DAG.setRoot(Res);
5444     return nullptr;
5445   }
5446   case Intrinsic::objectsize: {
5447     // If we don't know by now, we're never going to know.
5448     ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(1));
5449 
5450     assert(CI && "Non-constant type in __builtin_object_size?");
5451 
5452     SDValue Arg = getValue(I.getCalledValue());
5453     EVT Ty = Arg.getValueType();
5454 
5455     if (CI->isZero())
5456       Res = DAG.getConstant(-1ULL, sdl, Ty);
5457     else
5458       Res = DAG.getConstant(0, sdl, Ty);
5459 
5460     setValue(&I, Res);
5461     return nullptr;
5462   }
5463   case Intrinsic::annotation:
5464   case Intrinsic::ptr_annotation:
5465   case Intrinsic::invariant_group_barrier:
5466     // Drop the intrinsic, but forward the value
5467     setValue(&I, getValue(I.getOperand(0)));
5468     return nullptr;
5469   case Intrinsic::assume:
5470   case Intrinsic::var_annotation:
5471     // Discard annotate attributes and assumptions
5472     return nullptr;
5473 
5474   case Intrinsic::init_trampoline: {
5475     const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts());
5476 
5477     SDValue Ops[6];
5478     Ops[0] = getRoot();
5479     Ops[1] = getValue(I.getArgOperand(0));
5480     Ops[2] = getValue(I.getArgOperand(1));
5481     Ops[3] = getValue(I.getArgOperand(2));
5482     Ops[4] = DAG.getSrcValue(I.getArgOperand(0));
5483     Ops[5] = DAG.getSrcValue(F);
5484 
5485     Res = DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other, Ops);
5486 
5487     DAG.setRoot(Res);
5488     return nullptr;
5489   }
5490   case Intrinsic::adjust_trampoline: {
5491     setValue(&I, DAG.getNode(ISD::ADJUST_TRAMPOLINE, sdl,
5492                              TLI.getPointerTy(DAG.getDataLayout()),
5493                              getValue(I.getArgOperand(0))));
5494     return nullptr;
5495   }
5496   case Intrinsic::gcroot: {
5497     MachineFunction &MF = DAG.getMachineFunction();
5498     const Function *F = MF.getFunction();
5499     (void)F;
5500     assert(F->hasGC() &&
5501            "only valid in functions with gc specified, enforced by Verifier");
5502     assert(GFI && "implied by previous");
5503     const Value *Alloca = I.getArgOperand(0)->stripPointerCasts();
5504     const Constant *TypeMap = cast<Constant>(I.getArgOperand(1));
5505 
5506     FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
5507     GFI->addStackRoot(FI->getIndex(), TypeMap);
5508     return nullptr;
5509   }
5510   case Intrinsic::gcread:
5511   case Intrinsic::gcwrite:
5512     llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
5513   case Intrinsic::flt_rounds:
5514     setValue(&I, DAG.getNode(ISD::FLT_ROUNDS_, sdl, MVT::i32));
5515     return nullptr;
5516 
5517   case Intrinsic::expect: {
5518     // Just replace __builtin_expect(exp, c) with EXP.
5519     setValue(&I, getValue(I.getArgOperand(0)));
5520     return nullptr;
5521   }
5522 
5523   case Intrinsic::debugtrap:
5524   case Intrinsic::trap: {
5525     StringRef TrapFuncName =
5526         I.getAttributes()
5527             .getAttribute(AttributeList::FunctionIndex, "trap-func-name")
5528             .getValueAsString();
5529     if (TrapFuncName.empty()) {
5530       ISD::NodeType Op = (Intrinsic == Intrinsic::trap) ?
5531         ISD::TRAP : ISD::DEBUGTRAP;
5532       DAG.setRoot(DAG.getNode(Op, sdl,MVT::Other, getRoot()));
5533       return nullptr;
5534     }
5535     TargetLowering::ArgListTy Args;
5536 
5537     TargetLowering::CallLoweringInfo CLI(DAG);
5538     CLI.setDebugLoc(sdl).setChain(getRoot()).setLibCallee(
5539         CallingConv::C, I.getType(),
5540         DAG.getExternalSymbol(TrapFuncName.data(),
5541                               TLI.getPointerTy(DAG.getDataLayout())),
5542         std::move(Args));
5543 
5544     std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
5545     DAG.setRoot(Result.second);
5546     return nullptr;
5547   }
5548 
5549   case Intrinsic::uadd_with_overflow:
5550   case Intrinsic::sadd_with_overflow:
5551   case Intrinsic::usub_with_overflow:
5552   case Intrinsic::ssub_with_overflow:
5553   case Intrinsic::umul_with_overflow:
5554   case Intrinsic::smul_with_overflow: {
5555     ISD::NodeType Op;
5556     switch (Intrinsic) {
5557     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
5558     case Intrinsic::uadd_with_overflow: Op = ISD::UADDO; break;
5559     case Intrinsic::sadd_with_overflow: Op = ISD::SADDO; break;
5560     case Intrinsic::usub_with_overflow: Op = ISD::USUBO; break;
5561     case Intrinsic::ssub_with_overflow: Op = ISD::SSUBO; break;
5562     case Intrinsic::umul_with_overflow: Op = ISD::UMULO; break;
5563     case Intrinsic::smul_with_overflow: Op = ISD::SMULO; break;
5564     }
5565     SDValue Op1 = getValue(I.getArgOperand(0));
5566     SDValue Op2 = getValue(I.getArgOperand(1));
5567 
5568     SDVTList VTs = DAG.getVTList(Op1.getValueType(), MVT::i1);
5569     setValue(&I, DAG.getNode(Op, sdl, VTs, Op1, Op2));
5570     return nullptr;
5571   }
5572   case Intrinsic::prefetch: {
5573     SDValue Ops[5];
5574     unsigned rw = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
5575     Ops[0] = getRoot();
5576     Ops[1] = getValue(I.getArgOperand(0));
5577     Ops[2] = getValue(I.getArgOperand(1));
5578     Ops[3] = getValue(I.getArgOperand(2));
5579     Ops[4] = getValue(I.getArgOperand(3));
5580     DAG.setRoot(DAG.getMemIntrinsicNode(ISD::PREFETCH, sdl,
5581                                         DAG.getVTList(MVT::Other), Ops,
5582                                         EVT::getIntegerVT(*Context, 8),
5583                                         MachinePointerInfo(I.getArgOperand(0)),
5584                                         0, /* align */
5585                                         false, /* volatile */
5586                                         rw==0, /* read */
5587                                         rw==1)); /* write */
5588     return nullptr;
5589   }
5590   case Intrinsic::lifetime_start:
5591   case Intrinsic::lifetime_end: {
5592     bool IsStart = (Intrinsic == Intrinsic::lifetime_start);
5593     // Stack coloring is not enabled in O0, discard region information.
5594     if (TM.getOptLevel() == CodeGenOpt::None)
5595       return nullptr;
5596 
5597     SmallVector<Value *, 4> Allocas;
5598     GetUnderlyingObjects(I.getArgOperand(1), Allocas, *DL);
5599 
5600     for (SmallVectorImpl<Value*>::iterator Object = Allocas.begin(),
5601            E = Allocas.end(); Object != E; ++Object) {
5602       AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(*Object);
5603 
5604       // Could not find an Alloca.
5605       if (!LifetimeObject)
5606         continue;
5607 
5608       // First check that the Alloca is static, otherwise it won't have a
5609       // valid frame index.
5610       auto SI = FuncInfo.StaticAllocaMap.find(LifetimeObject);
5611       if (SI == FuncInfo.StaticAllocaMap.end())
5612         return nullptr;
5613 
5614       int FI = SI->second;
5615 
5616       SDValue Ops[2];
5617       Ops[0] = getRoot();
5618       Ops[1] =
5619           DAG.getFrameIndex(FI, TLI.getFrameIndexTy(DAG.getDataLayout()), true);
5620       unsigned Opcode = (IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END);
5621 
5622       Res = DAG.getNode(Opcode, sdl, MVT::Other, Ops);
5623       DAG.setRoot(Res);
5624     }
5625     return nullptr;
5626   }
5627   case Intrinsic::invariant_start:
5628     // Discard region information.
5629     setValue(&I, DAG.getUNDEF(TLI.getPointerTy(DAG.getDataLayout())));
5630     return nullptr;
5631   case Intrinsic::invariant_end:
5632     // Discard region information.
5633     return nullptr;
5634   case Intrinsic::clear_cache:
5635     return TLI.getClearCacheBuiltinName();
5636   case Intrinsic::donothing:
5637     // ignore
5638     return nullptr;
5639   case Intrinsic::experimental_stackmap: {
5640     visitStackmap(I);
5641     return nullptr;
5642   }
5643   case Intrinsic::experimental_patchpoint_void:
5644   case Intrinsic::experimental_patchpoint_i64: {
5645     visitPatchpoint(&I);
5646     return nullptr;
5647   }
5648   case Intrinsic::experimental_gc_statepoint: {
5649     LowerStatepoint(ImmutableStatepoint(&I));
5650     return nullptr;
5651   }
5652   case Intrinsic::experimental_gc_result: {
5653     visitGCResult(cast<GCResultInst>(I));
5654     return nullptr;
5655   }
5656   case Intrinsic::experimental_gc_relocate: {
5657     visitGCRelocate(cast<GCRelocateInst>(I));
5658     return nullptr;
5659   }
5660   case Intrinsic::instrprof_increment:
5661     llvm_unreachable("instrprof failed to lower an increment");
5662   case Intrinsic::instrprof_value_profile:
5663     llvm_unreachable("instrprof failed to lower a value profiling call");
5664   case Intrinsic::localescape: {
5665     MachineFunction &MF = DAG.getMachineFunction();
5666     const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
5667 
5668     // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission
5669     // is the same on all targets.
5670     for (unsigned Idx = 0, E = I.getNumArgOperands(); Idx < E; ++Idx) {
5671       Value *Arg = I.getArgOperand(Idx)->stripPointerCasts();
5672       if (isa<ConstantPointerNull>(Arg))
5673         continue; // Skip null pointers. They represent a hole in index space.
5674       AllocaInst *Slot = cast<AllocaInst>(Arg);
5675       assert(FuncInfo.StaticAllocaMap.count(Slot) &&
5676              "can only escape static allocas");
5677       int FI = FuncInfo.StaticAllocaMap[Slot];
5678       MCSymbol *FrameAllocSym =
5679           MF.getMMI().getContext().getOrCreateFrameAllocSymbol(
5680               GlobalValue::getRealLinkageName(MF.getName()), Idx);
5681       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, dl,
5682               TII->get(TargetOpcode::LOCAL_ESCAPE))
5683           .addSym(FrameAllocSym)
5684           .addFrameIndex(FI);
5685     }
5686 
5687     return nullptr;
5688   }
5689 
5690   case Intrinsic::localrecover: {
5691     // i8* @llvm.localrecover(i8* %fn, i8* %fp, i32 %idx)
5692     MachineFunction &MF = DAG.getMachineFunction();
5693     MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout(), 0);
5694 
5695     // Get the symbol that defines the frame offset.
5696     auto *Fn = cast<Function>(I.getArgOperand(0)->stripPointerCasts());
5697     auto *Idx = cast<ConstantInt>(I.getArgOperand(2));
5698     unsigned IdxVal = unsigned(Idx->getLimitedValue(INT_MAX));
5699     MCSymbol *FrameAllocSym =
5700         MF.getMMI().getContext().getOrCreateFrameAllocSymbol(
5701             GlobalValue::getRealLinkageName(Fn->getName()), IdxVal);
5702 
5703     // Create a MCSymbol for the label to avoid any target lowering
5704     // that would make this PC relative.
5705     SDValue OffsetSym = DAG.getMCSymbol(FrameAllocSym, PtrVT);
5706     SDValue OffsetVal =
5707         DAG.getNode(ISD::LOCAL_RECOVER, sdl, PtrVT, OffsetSym);
5708 
5709     // Add the offset to the FP.
5710     Value *FP = I.getArgOperand(1);
5711     SDValue FPVal = getValue(FP);
5712     SDValue Add = DAG.getNode(ISD::ADD, sdl, PtrVT, FPVal, OffsetVal);
5713     setValue(&I, Add);
5714 
5715     return nullptr;
5716   }
5717 
5718   case Intrinsic::eh_exceptionpointer:
5719   case Intrinsic::eh_exceptioncode: {
5720     // Get the exception pointer vreg, copy from it, and resize it to fit.
5721     const auto *CPI = cast<CatchPadInst>(I.getArgOperand(0));
5722     MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
5723     const TargetRegisterClass *PtrRC = TLI.getRegClassFor(PtrVT);
5724     unsigned VReg = FuncInfo.getCatchPadExceptionPointerVReg(CPI, PtrRC);
5725     SDValue N =
5726         DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(), VReg, PtrVT);
5727     if (Intrinsic == Intrinsic::eh_exceptioncode)
5728       N = DAG.getZExtOrTrunc(N, getCurSDLoc(), MVT::i32);
5729     setValue(&I, N);
5730     return nullptr;
5731   }
5732 
5733   case Intrinsic::experimental_deoptimize:
5734     LowerDeoptimizeCall(&I);
5735     return nullptr;
5736   }
5737 }
5738 
5739 void SelectionDAGBuilder::visitConstrainedFPIntrinsic(const CallInst &I,
5740                                                       unsigned Intrinsic) {
5741   SDLoc sdl = getCurSDLoc();
5742   unsigned Opcode;
5743   switch (Intrinsic) {
5744   default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
5745   case Intrinsic::experimental_constrained_fadd:
5746     Opcode = ISD::STRICT_FADD;
5747     break;
5748   case Intrinsic::experimental_constrained_fsub:
5749     Opcode = ISD::STRICT_FSUB;
5750     break;
5751   case Intrinsic::experimental_constrained_fmul:
5752     Opcode = ISD::STRICT_FMUL;
5753     break;
5754   case Intrinsic::experimental_constrained_fdiv:
5755     Opcode = ISD::STRICT_FDIV;
5756     break;
5757   case Intrinsic::experimental_constrained_frem:
5758     Opcode = ISD::STRICT_FREM;
5759     break;
5760   }
5761   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5762   SDValue Chain = getRoot();
5763   SDValue Ops[3] = { Chain, getValue(I.getArgOperand(0)),
5764                      getValue(I.getArgOperand(1)) };
5765   SmallVector<EVT, 4> ValueVTs;
5766   ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);
5767   ValueVTs.push_back(MVT::Other); // Out chain
5768 
5769   SDVTList VTs = DAG.getVTList(ValueVTs);
5770   SDValue Result = DAG.getNode(Opcode, sdl, VTs, Ops);
5771 
5772   assert(Result.getNode()->getNumValues() == 2);
5773   SDValue OutChain = Result.getValue(1);
5774   DAG.setRoot(OutChain);
5775   SDValue FPResult = Result.getValue(0);
5776   setValue(&I, FPResult);
5777 }
5778 
5779 std::pair<SDValue, SDValue>
5780 SelectionDAGBuilder::lowerInvokable(TargetLowering::CallLoweringInfo &CLI,
5781                                     const BasicBlock *EHPadBB) {
5782   MachineFunction &MF = DAG.getMachineFunction();
5783   MachineModuleInfo &MMI = MF.getMMI();
5784   MCSymbol *BeginLabel = nullptr;
5785 
5786   if (EHPadBB) {
5787     // Insert a label before the invoke call to mark the try range.  This can be
5788     // used to detect deletion of the invoke via the MachineModuleInfo.
5789     BeginLabel = MMI.getContext().createTempSymbol();
5790 
5791     // For SjLj, keep track of which landing pads go with which invokes
5792     // so as to maintain the ordering of pads in the LSDA.
5793     unsigned CallSiteIndex = MMI.getCurrentCallSite();
5794     if (CallSiteIndex) {
5795       MF.setCallSiteBeginLabel(BeginLabel, CallSiteIndex);
5796       LPadToCallSiteMap[FuncInfo.MBBMap[EHPadBB]].push_back(CallSiteIndex);
5797 
5798       // Now that the call site is handled, stop tracking it.
5799       MMI.setCurrentCallSite(0);
5800     }
5801 
5802     // Both PendingLoads and PendingExports must be flushed here;
5803     // this call might not return.
5804     (void)getRoot();
5805     DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getControlRoot(), BeginLabel));
5806 
5807     CLI.setChain(getRoot());
5808   }
5809   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5810   std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
5811 
5812   assert((CLI.IsTailCall || Result.second.getNode()) &&
5813          "Non-null chain expected with non-tail call!");
5814   assert((Result.second.getNode() || !Result.first.getNode()) &&
5815          "Null value expected with tail call!");
5816 
5817   if (!Result.second.getNode()) {
5818     // As a special case, a null chain means that a tail call has been emitted
5819     // and the DAG root is already updated.
5820     HasTailCall = true;
5821 
5822     // Since there's no actual continuation from this block, nothing can be
5823     // relying on us setting vregs for them.
5824     PendingExports.clear();
5825   } else {
5826     DAG.setRoot(Result.second);
5827   }
5828 
5829   if (EHPadBB) {
5830     // Insert a label at the end of the invoke call to mark the try range.  This
5831     // can be used to detect deletion of the invoke via the MachineModuleInfo.
5832     MCSymbol *EndLabel = MMI.getContext().createTempSymbol();
5833     DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getRoot(), EndLabel));
5834 
5835     // Inform MachineModuleInfo of range.
5836     if (MF.hasEHFunclets()) {
5837       assert(CLI.CS);
5838       WinEHFuncInfo *EHInfo = DAG.getMachineFunction().getWinEHFuncInfo();
5839       EHInfo->addIPToStateRange(cast<InvokeInst>(CLI.CS->getInstruction()),
5840                                 BeginLabel, EndLabel);
5841     } else {
5842       MF.addInvoke(FuncInfo.MBBMap[EHPadBB], BeginLabel, EndLabel);
5843     }
5844   }
5845 
5846   return Result;
5847 }
5848 
5849 void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
5850                                       bool isTailCall,
5851                                       const BasicBlock *EHPadBB) {
5852   auto &DL = DAG.getDataLayout();
5853   FunctionType *FTy = CS.getFunctionType();
5854   Type *RetTy = CS.getType();
5855 
5856   TargetLowering::ArgListTy Args;
5857   Args.reserve(CS.arg_size());
5858 
5859   const Value *SwiftErrorVal = nullptr;
5860   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5861 
5862   // We can't tail call inside a function with a swifterror argument. Lowering
5863   // does not support this yet. It would have to move into the swifterror
5864   // register before the call.
5865   auto *Caller = CS.getInstruction()->getParent()->getParent();
5866   if (TLI.supportSwiftError() &&
5867       Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
5868     isTailCall = false;
5869 
5870   for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
5871        i != e; ++i) {
5872     TargetLowering::ArgListEntry Entry;
5873     const Value *V = *i;
5874 
5875     // Skip empty types
5876     if (V->getType()->isEmptyTy())
5877       continue;
5878 
5879     SDValue ArgNode = getValue(V);
5880     Entry.Node = ArgNode; Entry.Ty = V->getType();
5881 
5882     Entry.setAttributes(&CS, i - CS.arg_begin());
5883 
5884     // Use swifterror virtual register as input to the call.
5885     if (Entry.IsSwiftError && TLI.supportSwiftError()) {
5886       SwiftErrorVal = V;
5887       // We find the virtual register for the actual swifterror argument.
5888       // Instead of using the Value, we use the virtual register instead.
5889       Entry.Node =
5890           DAG.getRegister(FuncInfo.getOrCreateSwiftErrorVReg(FuncInfo.MBB, V),
5891                           EVT(TLI.getPointerTy(DL)));
5892     }
5893 
5894     Args.push_back(Entry);
5895 
5896     // If we have an explicit sret argument that is an Instruction, (i.e., it
5897     // might point to function-local memory), we can't meaningfully tail-call.
5898     if (Entry.IsSRet && isa<Instruction>(V))
5899       isTailCall = false;
5900   }
5901 
5902   // Check if target-independent constraints permit a tail call here.
5903   // Target-dependent constraints are checked within TLI->LowerCallTo.
5904   if (isTailCall && !isInTailCallPosition(CS, DAG.getTarget()))
5905     isTailCall = false;
5906 
5907   // Disable tail calls if there is an swifterror argument. Targets have not
5908   // been updated to support tail calls.
5909   if (TLI.supportSwiftError() && SwiftErrorVal)
5910     isTailCall = false;
5911 
5912   TargetLowering::CallLoweringInfo CLI(DAG);
5913   CLI.setDebugLoc(getCurSDLoc())
5914       .setChain(getRoot())
5915       .setCallee(RetTy, FTy, Callee, std::move(Args), CS)
5916       .setTailCall(isTailCall)
5917       .setConvergent(CS.isConvergent());
5918   std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
5919 
5920   if (Result.first.getNode()) {
5921     const Instruction *Inst = CS.getInstruction();
5922     Result.first = lowerRangeToAssertZExt(DAG, *Inst, Result.first);
5923     setValue(Inst, Result.first);
5924   }
5925 
5926   // The last element of CLI.InVals has the SDValue for swifterror return.
5927   // Here we copy it to a virtual register and update SwiftErrorMap for
5928   // book-keeping.
5929   if (SwiftErrorVal && TLI.supportSwiftError()) {
5930     // Get the last element of InVals.
5931     SDValue Src = CLI.InVals.back();
5932     const TargetRegisterClass *RC = TLI.getRegClassFor(TLI.getPointerTy(DL));
5933     unsigned VReg = FuncInfo.MF->getRegInfo().createVirtualRegister(RC);
5934     SDValue CopyNode = CLI.DAG.getCopyToReg(Result.second, CLI.DL, VReg, Src);
5935     // We update the virtual register for the actual swifterror argument.
5936     FuncInfo.setCurrentSwiftErrorVReg(FuncInfo.MBB, SwiftErrorVal, VReg);
5937     DAG.setRoot(CopyNode);
5938   }
5939 }
5940 
5941 /// Return true if it only matters that the value is equal or not-equal to zero.
5942 static bool IsOnlyUsedInZeroEqualityComparison(const Value *V) {
5943   for (const User *U : V->users()) {
5944     if (const ICmpInst *IC = dyn_cast<ICmpInst>(U))
5945       if (IC->isEquality())
5946         if (const Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
5947           if (C->isNullValue())
5948             continue;
5949     // Unknown instruction.
5950     return false;
5951   }
5952   return true;
5953 }
5954 
5955 static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
5956                              SelectionDAGBuilder &Builder) {
5957 
5958   // Check to see if this load can be trivially constant folded, e.g. if the
5959   // input is from a string literal.
5960   if (const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) {
5961     // Cast pointer to the type we really want to load.
5962     Type *LoadTy =
5963         Type::getIntNTy(PtrVal->getContext(), LoadVT.getScalarSizeInBits());
5964     if (LoadVT.isVector())
5965       LoadTy = VectorType::get(LoadTy, LoadVT.getVectorNumElements());
5966 
5967     LoadInput = ConstantExpr::getBitCast(const_cast<Constant *>(LoadInput),
5968                                          PointerType::getUnqual(LoadTy));
5969 
5970     if (const Constant *LoadCst = ConstantFoldLoadFromConstPtr(
5971             const_cast<Constant *>(LoadInput), LoadTy, *Builder.DL))
5972       return Builder.getValue(LoadCst);
5973   }
5974 
5975   // Otherwise, we have to emit the load.  If the pointer is to unfoldable but
5976   // still constant memory, the input chain can be the entry node.
5977   SDValue Root;
5978   bool ConstantMemory = false;
5979 
5980   // Do not serialize (non-volatile) loads of constant memory with anything.
5981   if (Builder.AA->pointsToConstantMemory(PtrVal)) {
5982     Root = Builder.DAG.getEntryNode();
5983     ConstantMemory = true;
5984   } else {
5985     // Do not serialize non-volatile loads against each other.
5986     Root = Builder.DAG.getRoot();
5987   }
5988 
5989   SDValue Ptr = Builder.getValue(PtrVal);
5990   SDValue LoadVal = Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root,
5991                                         Ptr, MachinePointerInfo(PtrVal),
5992                                         /* Alignment = */ 1);
5993 
5994   if (!ConstantMemory)
5995     Builder.PendingLoads.push_back(LoadVal.getValue(1));
5996   return LoadVal;
5997 }
5998 
5999 /// Record the value for an instruction that produces an integer result,
6000 /// converting the type where necessary.
6001 void SelectionDAGBuilder::processIntegerCallValue(const Instruction &I,
6002                                                   SDValue Value,
6003                                                   bool IsSigned) {
6004   EVT VT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
6005                                                     I.getType(), true);
6006   if (IsSigned)
6007     Value = DAG.getSExtOrTrunc(Value, getCurSDLoc(), VT);
6008   else
6009     Value = DAG.getZExtOrTrunc(Value, getCurSDLoc(), VT);
6010   setValue(&I, Value);
6011 }
6012 
6013 /// See if we can lower a memcmp call into an optimized form. If so, return
6014 /// true and lower it. Otherwise return false, and it will be lowered like a
6015 /// normal call.
6016 /// The caller already checked that \p I calls the appropriate LibFunc with a
6017 /// correct prototype.
6018 bool SelectionDAGBuilder::visitMemCmpCall(const CallInst &I) {
6019   const Value *LHS = I.getArgOperand(0), *RHS = I.getArgOperand(1);
6020   const Value *Size = I.getArgOperand(2);
6021   const ConstantInt *CSize = dyn_cast<ConstantInt>(Size);
6022   if (CSize && CSize->getZExtValue() == 0) {
6023     EVT CallVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
6024                                                           I.getType(), true);
6025     setValue(&I, DAG.getConstant(0, getCurSDLoc(), CallVT));
6026     return true;
6027   }
6028 
6029   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
6030   std::pair<SDValue, SDValue> Res = TSI.EmitTargetCodeForMemcmp(
6031       DAG, getCurSDLoc(), DAG.getRoot(), getValue(LHS), getValue(RHS),
6032       getValue(Size), MachinePointerInfo(LHS), MachinePointerInfo(RHS));
6033   if (Res.first.getNode()) {
6034     processIntegerCallValue(I, Res.first, true);
6035     PendingLoads.push_back(Res.second);
6036     return true;
6037   }
6038 
6039   // memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS)  != 0
6040   // memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS)  != 0
6041   if (!CSize || !IsOnlyUsedInZeroEqualityComparison(&I))
6042     return false;
6043 
6044   // If the target has a fast compare for the given size, it will return a
6045   // preferred load type for that size. Require that the load VT is legal and
6046   // that the target supports unaligned loads of that type. Otherwise, return
6047   // INVALID.
6048   auto hasFastLoadsAndCompare = [&](unsigned NumBits) {
6049     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6050     MVT LVT = TLI.hasFastEqualityCompare(NumBits);
6051     if (LVT != MVT::INVALID_SIMPLE_VALUE_TYPE) {
6052       // TODO: Handle 5 byte compare as 4-byte + 1 byte.
6053       // TODO: Handle 8 byte compare on x86-32 as two 32-bit loads.
6054       // TODO: Check alignment of src and dest ptrs.
6055       unsigned DstAS = LHS->getType()->getPointerAddressSpace();
6056       unsigned SrcAS = RHS->getType()->getPointerAddressSpace();
6057       if (!TLI.isTypeLegal(LVT) ||
6058           !TLI.allowsMisalignedMemoryAccesses(LVT, SrcAS) ||
6059           !TLI.allowsMisalignedMemoryAccesses(LVT, DstAS))
6060         LVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
6061     }
6062 
6063     return LVT;
6064   };
6065 
6066   // This turns into unaligned loads. We only do this if the target natively
6067   // supports the MVT we'll be loading or if it is small enough (<= 4) that
6068   // we'll only produce a small number of byte loads.
6069   MVT LoadVT;
6070   unsigned NumBitsToCompare = CSize->getZExtValue() * 8;
6071   switch (NumBitsToCompare) {
6072   default:
6073     return false;
6074   case 16:
6075     LoadVT = MVT::i16;
6076     break;
6077   case 32:
6078     LoadVT = MVT::i32;
6079     break;
6080   case 64:
6081   case 128:
6082   case 256:
6083     LoadVT = hasFastLoadsAndCompare(NumBitsToCompare);
6084     break;
6085   }
6086 
6087   if (LoadVT == MVT::INVALID_SIMPLE_VALUE_TYPE)
6088     return false;
6089 
6090   SDValue LoadL = getMemCmpLoad(LHS, LoadVT, *this);
6091   SDValue LoadR = getMemCmpLoad(RHS, LoadVT, *this);
6092 
6093   // Bitcast to a wide integer type if the loads are vectors.
6094   if (LoadVT.isVector()) {
6095     EVT CmpVT = EVT::getIntegerVT(LHS->getContext(), LoadVT.getSizeInBits());
6096     LoadL = DAG.getBitcast(CmpVT, LoadL);
6097     LoadR = DAG.getBitcast(CmpVT, LoadR);
6098   }
6099 
6100   SDValue Cmp = DAG.getSetCC(getCurSDLoc(), MVT::i1, LoadL, LoadR, ISD::SETNE);
6101   processIntegerCallValue(I, Cmp, false);
6102   return true;
6103 }
6104 
6105 /// See if we can lower a memchr call into an optimized form. If so, return
6106 /// true and lower it. Otherwise return false, and it will be lowered like a
6107 /// normal call.
6108 /// The caller already checked that \p I calls the appropriate LibFunc with a
6109 /// correct prototype.
6110 bool SelectionDAGBuilder::visitMemChrCall(const CallInst &I) {
6111   const Value *Src = I.getArgOperand(0);
6112   const Value *Char = I.getArgOperand(1);
6113   const Value *Length = I.getArgOperand(2);
6114 
6115   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
6116   std::pair<SDValue, SDValue> Res =
6117     TSI.EmitTargetCodeForMemchr(DAG, getCurSDLoc(), DAG.getRoot(),
6118                                 getValue(Src), getValue(Char), getValue(Length),
6119                                 MachinePointerInfo(Src));
6120   if (Res.first.getNode()) {
6121     setValue(&I, Res.first);
6122     PendingLoads.push_back(Res.second);
6123     return true;
6124   }
6125 
6126   return false;
6127 }
6128 
6129 /// See if we can lower a mempcpy call into an optimized form. If so, return
6130 /// true and lower it. Otherwise return false, and it will be lowered like a
6131 /// normal call.
6132 /// The caller already checked that \p I calls the appropriate LibFunc with a
6133 /// correct prototype.
6134 bool SelectionDAGBuilder::visitMemPCpyCall(const CallInst &I) {
6135   SDValue Dst = getValue(I.getArgOperand(0));
6136   SDValue Src = getValue(I.getArgOperand(1));
6137   SDValue Size = getValue(I.getArgOperand(2));
6138 
6139   unsigned DstAlign = DAG.InferPtrAlignment(Dst);
6140   unsigned SrcAlign = DAG.InferPtrAlignment(Src);
6141   unsigned Align = std::min(DstAlign, SrcAlign);
6142   if (Align == 0) // Alignment of one or both could not be inferred.
6143     Align = 1; // 0 and 1 both specify no alignment, but 0 is reserved.
6144 
6145   bool isVol = false;
6146   SDLoc sdl = getCurSDLoc();
6147 
6148   // In the mempcpy context we need to pass in a false value for isTailCall
6149   // because the return pointer needs to be adjusted by the size of
6150   // the copied memory.
6151   SDValue MC = DAG.getMemcpy(getRoot(), sdl, Dst, Src, Size, Align, isVol,
6152                              false, /*isTailCall=*/false,
6153                              MachinePointerInfo(I.getArgOperand(0)),
6154                              MachinePointerInfo(I.getArgOperand(1)));
6155   assert(MC.getNode() != nullptr &&
6156          "** memcpy should not be lowered as TailCall in mempcpy context **");
6157   DAG.setRoot(MC);
6158 
6159   // Check if Size needs to be truncated or extended.
6160   Size = DAG.getSExtOrTrunc(Size, sdl, Dst.getValueType());
6161 
6162   // Adjust return pointer to point just past the last dst byte.
6163   SDValue DstPlusSize = DAG.getNode(ISD::ADD, sdl, Dst.getValueType(),
6164                                     Dst, Size);
6165   setValue(&I, DstPlusSize);
6166   return true;
6167 }
6168 
6169 /// See if we can lower a strcpy call into an optimized form.  If so, return
6170 /// true and lower it, otherwise return false and it will be lowered like a
6171 /// normal call.
6172 /// The caller already checked that \p I calls the appropriate LibFunc with a
6173 /// correct prototype.
6174 bool SelectionDAGBuilder::visitStrCpyCall(const CallInst &I, bool isStpcpy) {
6175   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
6176 
6177   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
6178   std::pair<SDValue, SDValue> Res =
6179     TSI.EmitTargetCodeForStrcpy(DAG, getCurSDLoc(), getRoot(),
6180                                 getValue(Arg0), getValue(Arg1),
6181                                 MachinePointerInfo(Arg0),
6182                                 MachinePointerInfo(Arg1), isStpcpy);
6183   if (Res.first.getNode()) {
6184     setValue(&I, Res.first);
6185     DAG.setRoot(Res.second);
6186     return true;
6187   }
6188 
6189   return false;
6190 }
6191 
6192 /// See if we can lower a strcmp call into an optimized form.  If so, return
6193 /// true and lower it, otherwise return false and it will be lowered like a
6194 /// normal call.
6195 /// The caller already checked that \p I calls the appropriate LibFunc with a
6196 /// correct prototype.
6197 bool SelectionDAGBuilder::visitStrCmpCall(const CallInst &I) {
6198   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
6199 
6200   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
6201   std::pair<SDValue, SDValue> Res =
6202     TSI.EmitTargetCodeForStrcmp(DAG, getCurSDLoc(), DAG.getRoot(),
6203                                 getValue(Arg0), getValue(Arg1),
6204                                 MachinePointerInfo(Arg0),
6205                                 MachinePointerInfo(Arg1));
6206   if (Res.first.getNode()) {
6207     processIntegerCallValue(I, Res.first, true);
6208     PendingLoads.push_back(Res.second);
6209     return true;
6210   }
6211 
6212   return false;
6213 }
6214 
6215 /// See if we can lower a strlen call into an optimized form.  If so, return
6216 /// true and lower it, otherwise return false and it will be lowered like a
6217 /// normal call.
6218 /// The caller already checked that \p I calls the appropriate LibFunc with a
6219 /// correct prototype.
6220 bool SelectionDAGBuilder::visitStrLenCall(const CallInst &I) {
6221   const Value *Arg0 = I.getArgOperand(0);
6222 
6223   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
6224   std::pair<SDValue, SDValue> Res =
6225     TSI.EmitTargetCodeForStrlen(DAG, getCurSDLoc(), DAG.getRoot(),
6226                                 getValue(Arg0), MachinePointerInfo(Arg0));
6227   if (Res.first.getNode()) {
6228     processIntegerCallValue(I, Res.first, false);
6229     PendingLoads.push_back(Res.second);
6230     return true;
6231   }
6232 
6233   return false;
6234 }
6235 
6236 /// See if we can lower a strnlen call into an optimized form.  If so, return
6237 /// true and lower it, otherwise return false and it will be lowered like a
6238 /// normal call.
6239 /// The caller already checked that \p I calls the appropriate LibFunc with a
6240 /// correct prototype.
6241 bool SelectionDAGBuilder::visitStrNLenCall(const CallInst &I) {
6242   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
6243 
6244   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
6245   std::pair<SDValue, SDValue> Res =
6246     TSI.EmitTargetCodeForStrnlen(DAG, getCurSDLoc(), DAG.getRoot(),
6247                                  getValue(Arg0), getValue(Arg1),
6248                                  MachinePointerInfo(Arg0));
6249   if (Res.first.getNode()) {
6250     processIntegerCallValue(I, Res.first, false);
6251     PendingLoads.push_back(Res.second);
6252     return true;
6253   }
6254 
6255   return false;
6256 }
6257 
6258 /// See if we can lower a unary floating-point operation into an SDNode with
6259 /// the specified Opcode.  If so, return true and lower it, otherwise return
6260 /// false and it will be lowered like a normal call.
6261 /// The caller already checked that \p I calls the appropriate LibFunc with a
6262 /// correct prototype.
6263 bool SelectionDAGBuilder::visitUnaryFloatCall(const CallInst &I,
6264                                               unsigned Opcode) {
6265   // We already checked this call's prototype; verify it doesn't modify errno.
6266   if (!I.onlyReadsMemory())
6267     return false;
6268 
6269   SDValue Tmp = getValue(I.getArgOperand(0));
6270   setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), Tmp.getValueType(), Tmp));
6271   return true;
6272 }
6273 
6274 /// See if we can lower a binary floating-point operation into an SDNode with
6275 /// the specified Opcode. If so, return true and lower it. Otherwise return
6276 /// false, and it will be lowered like a normal call.
6277 /// The caller already checked that \p I calls the appropriate LibFunc with a
6278 /// correct prototype.
6279 bool SelectionDAGBuilder::visitBinaryFloatCall(const CallInst &I,
6280                                                unsigned Opcode) {
6281   // We already checked this call's prototype; verify it doesn't modify errno.
6282   if (!I.onlyReadsMemory())
6283     return false;
6284 
6285   SDValue Tmp0 = getValue(I.getArgOperand(0));
6286   SDValue Tmp1 = getValue(I.getArgOperand(1));
6287   EVT VT = Tmp0.getValueType();
6288   setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), VT, Tmp0, Tmp1));
6289   return true;
6290 }
6291 
6292 void SelectionDAGBuilder::visitCall(const CallInst &I) {
6293   // Handle inline assembly differently.
6294   if (isa<InlineAsm>(I.getCalledValue())) {
6295     visitInlineAsm(&I);
6296     return;
6297   }
6298 
6299   MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
6300   computeUsesVAFloatArgument(I, MMI);
6301 
6302   const char *RenameFn = nullptr;
6303   if (Function *F = I.getCalledFunction()) {
6304     if (F->isDeclaration()) {
6305       if (const TargetIntrinsicInfo *II = TM.getIntrinsicInfo()) {
6306         if (unsigned IID = II->getIntrinsicID(F)) {
6307           RenameFn = visitIntrinsicCall(I, IID);
6308           if (!RenameFn)
6309             return;
6310         }
6311       }
6312       if (Intrinsic::ID IID = F->getIntrinsicID()) {
6313         RenameFn = visitIntrinsicCall(I, IID);
6314         if (!RenameFn)
6315           return;
6316       }
6317     }
6318 
6319     // Check for well-known libc/libm calls.  If the function is internal, it
6320     // can't be a library call.  Don't do the check if marked as nobuiltin for
6321     // some reason.
6322     LibFunc Func;
6323     if (!I.isNoBuiltin() && !F->hasLocalLinkage() && F->hasName() &&
6324         LibInfo->getLibFunc(*F, Func) &&
6325         LibInfo->hasOptimizedCodeGen(Func)) {
6326       switch (Func) {
6327       default: break;
6328       case LibFunc_copysign:
6329       case LibFunc_copysignf:
6330       case LibFunc_copysignl:
6331         // We already checked this call's prototype; verify it doesn't modify
6332         // errno.
6333         if (I.onlyReadsMemory()) {
6334           SDValue LHS = getValue(I.getArgOperand(0));
6335           SDValue RHS = getValue(I.getArgOperand(1));
6336           setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurSDLoc(),
6337                                    LHS.getValueType(), LHS, RHS));
6338           return;
6339         }
6340         break;
6341       case LibFunc_fabs:
6342       case LibFunc_fabsf:
6343       case LibFunc_fabsl:
6344         if (visitUnaryFloatCall(I, ISD::FABS))
6345           return;
6346         break;
6347       case LibFunc_fmin:
6348       case LibFunc_fminf:
6349       case LibFunc_fminl:
6350         if (visitBinaryFloatCall(I, ISD::FMINNUM))
6351           return;
6352         break;
6353       case LibFunc_fmax:
6354       case LibFunc_fmaxf:
6355       case LibFunc_fmaxl:
6356         if (visitBinaryFloatCall(I, ISD::FMAXNUM))
6357           return;
6358         break;
6359       case LibFunc_sin:
6360       case LibFunc_sinf:
6361       case LibFunc_sinl:
6362         if (visitUnaryFloatCall(I, ISD::FSIN))
6363           return;
6364         break;
6365       case LibFunc_cos:
6366       case LibFunc_cosf:
6367       case LibFunc_cosl:
6368         if (visitUnaryFloatCall(I, ISD::FCOS))
6369           return;
6370         break;
6371       case LibFunc_sqrt:
6372       case LibFunc_sqrtf:
6373       case LibFunc_sqrtl:
6374       case LibFunc_sqrt_finite:
6375       case LibFunc_sqrtf_finite:
6376       case LibFunc_sqrtl_finite:
6377         if (visitUnaryFloatCall(I, ISD::FSQRT))
6378           return;
6379         break;
6380       case LibFunc_floor:
6381       case LibFunc_floorf:
6382       case LibFunc_floorl:
6383         if (visitUnaryFloatCall(I, ISD::FFLOOR))
6384           return;
6385         break;
6386       case LibFunc_nearbyint:
6387       case LibFunc_nearbyintf:
6388       case LibFunc_nearbyintl:
6389         if (visitUnaryFloatCall(I, ISD::FNEARBYINT))
6390           return;
6391         break;
6392       case LibFunc_ceil:
6393       case LibFunc_ceilf:
6394       case LibFunc_ceill:
6395         if (visitUnaryFloatCall(I, ISD::FCEIL))
6396           return;
6397         break;
6398       case LibFunc_rint:
6399       case LibFunc_rintf:
6400       case LibFunc_rintl:
6401         if (visitUnaryFloatCall(I, ISD::FRINT))
6402           return;
6403         break;
6404       case LibFunc_round:
6405       case LibFunc_roundf:
6406       case LibFunc_roundl:
6407         if (visitUnaryFloatCall(I, ISD::FROUND))
6408           return;
6409         break;
6410       case LibFunc_trunc:
6411       case LibFunc_truncf:
6412       case LibFunc_truncl:
6413         if (visitUnaryFloatCall(I, ISD::FTRUNC))
6414           return;
6415         break;
6416       case LibFunc_log2:
6417       case LibFunc_log2f:
6418       case LibFunc_log2l:
6419         if (visitUnaryFloatCall(I, ISD::FLOG2))
6420           return;
6421         break;
6422       case LibFunc_exp2:
6423       case LibFunc_exp2f:
6424       case LibFunc_exp2l:
6425         if (visitUnaryFloatCall(I, ISD::FEXP2))
6426           return;
6427         break;
6428       case LibFunc_memcmp:
6429         if (visitMemCmpCall(I))
6430           return;
6431         break;
6432       case LibFunc_mempcpy:
6433         if (visitMemPCpyCall(I))
6434           return;
6435         break;
6436       case LibFunc_memchr:
6437         if (visitMemChrCall(I))
6438           return;
6439         break;
6440       case LibFunc_strcpy:
6441         if (visitStrCpyCall(I, false))
6442           return;
6443         break;
6444       case LibFunc_stpcpy:
6445         if (visitStrCpyCall(I, true))
6446           return;
6447         break;
6448       case LibFunc_strcmp:
6449         if (visitStrCmpCall(I))
6450           return;
6451         break;
6452       case LibFunc_strlen:
6453         if (visitStrLenCall(I))
6454           return;
6455         break;
6456       case LibFunc_strnlen:
6457         if (visitStrNLenCall(I))
6458           return;
6459         break;
6460       }
6461     }
6462   }
6463 
6464   SDValue Callee;
6465   if (!RenameFn)
6466     Callee = getValue(I.getCalledValue());
6467   else
6468     Callee = DAG.getExternalSymbol(
6469         RenameFn,
6470         DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()));
6471 
6472   // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
6473   // have to do anything here to lower funclet bundles.
6474   assert(!I.hasOperandBundlesOtherThan(
6475              {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) &&
6476          "Cannot lower calls with arbitrary operand bundles!");
6477 
6478   if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
6479     LowerCallSiteWithDeoptBundle(&I, Callee, nullptr);
6480   else
6481     // Check if we can potentially perform a tail call. More detailed checking
6482     // is be done within LowerCallTo, after more information about the call is
6483     // known.
6484     LowerCallTo(&I, Callee, I.isTailCall());
6485 }
6486 
6487 namespace {
6488 
6489 /// AsmOperandInfo - This contains information for each constraint that we are
6490 /// lowering.
6491 class SDISelAsmOperandInfo : public TargetLowering::AsmOperandInfo {
6492 public:
6493   /// CallOperand - If this is the result output operand or a clobber
6494   /// this is null, otherwise it is the incoming operand to the CallInst.
6495   /// This gets modified as the asm is processed.
6496   SDValue CallOperand;
6497 
6498   /// AssignedRegs - If this is a register or register class operand, this
6499   /// contains the set of register corresponding to the operand.
6500   RegsForValue AssignedRegs;
6501 
6502   explicit SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &info)
6503     : TargetLowering::AsmOperandInfo(info), CallOperand(nullptr,0) {
6504   }
6505 
6506   /// Whether or not this operand accesses memory
6507   bool hasMemory(const TargetLowering &TLI) const {
6508     // Indirect operand accesses access memory.
6509     if (isIndirect)
6510       return true;
6511 
6512     for (const auto &Code : Codes)
6513       if (TLI.getConstraintType(Code) == TargetLowering::C_Memory)
6514         return true;
6515 
6516     return false;
6517   }
6518 
6519   /// getCallOperandValEVT - Return the EVT of the Value* that this operand
6520   /// corresponds to.  If there is no Value* for this operand, it returns
6521   /// MVT::Other.
6522   EVT getCallOperandValEVT(LLVMContext &Context, const TargetLowering &TLI,
6523                            const DataLayout &DL) const {
6524     if (!CallOperandVal) return MVT::Other;
6525 
6526     if (isa<BasicBlock>(CallOperandVal))
6527       return TLI.getPointerTy(DL);
6528 
6529     llvm::Type *OpTy = CallOperandVal->getType();
6530 
6531     // FIXME: code duplicated from TargetLowering::ParseConstraints().
6532     // If this is an indirect operand, the operand is a pointer to the
6533     // accessed type.
6534     if (isIndirect) {
6535       llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
6536       if (!PtrTy)
6537         report_fatal_error("Indirect operand for inline asm not a pointer!");
6538       OpTy = PtrTy->getElementType();
6539     }
6540 
6541     // Look for vector wrapped in a struct. e.g. { <16 x i8> }.
6542     if (StructType *STy = dyn_cast<StructType>(OpTy))
6543       if (STy->getNumElements() == 1)
6544         OpTy = STy->getElementType(0);
6545 
6546     // If OpTy is not a single value, it may be a struct/union that we
6547     // can tile with integers.
6548     if (!OpTy->isSingleValueType() && OpTy->isSized()) {
6549       unsigned BitSize = DL.getTypeSizeInBits(OpTy);
6550       switch (BitSize) {
6551       default: break;
6552       case 1:
6553       case 8:
6554       case 16:
6555       case 32:
6556       case 64:
6557       case 128:
6558         OpTy = IntegerType::get(Context, BitSize);
6559         break;
6560       }
6561     }
6562 
6563     return TLI.getValueType(DL, OpTy, true);
6564   }
6565 };
6566 
6567 typedef SmallVector<SDISelAsmOperandInfo,16> SDISelAsmOperandInfoVector;
6568 
6569 } // end anonymous namespace
6570 
6571 /// Make sure that the output operand \p OpInfo and its corresponding input
6572 /// operand \p MatchingOpInfo have compatible constraint types (otherwise error
6573 /// out).
6574 static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo,
6575                                SDISelAsmOperandInfo &MatchingOpInfo,
6576                                SelectionDAG &DAG) {
6577   if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT)
6578     return;
6579 
6580   const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
6581   const auto &TLI = DAG.getTargetLoweringInfo();
6582 
6583   std::pair<unsigned, const TargetRegisterClass *> MatchRC =
6584       TLI.getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode,
6585                                        OpInfo.ConstraintVT);
6586   std::pair<unsigned, const TargetRegisterClass *> InputRC =
6587       TLI.getRegForInlineAsmConstraint(TRI, MatchingOpInfo.ConstraintCode,
6588                                        MatchingOpInfo.ConstraintVT);
6589   if ((OpInfo.ConstraintVT.isInteger() !=
6590        MatchingOpInfo.ConstraintVT.isInteger()) ||
6591       (MatchRC.second != InputRC.second)) {
6592     // FIXME: error out in a more elegant fashion
6593     report_fatal_error("Unsupported asm: input constraint"
6594                        " with a matching output constraint of"
6595                        " incompatible type!");
6596   }
6597   MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT;
6598 }
6599 
6600 /// Get a direct memory input to behave well as an indirect operand.
6601 /// This may introduce stores, hence the need for a \p Chain.
6602 /// \return The (possibly updated) chain.
6603 static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location,
6604                                         SDISelAsmOperandInfo &OpInfo,
6605                                         SelectionDAG &DAG) {
6606   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6607 
6608   // If we don't have an indirect input, put it in the constpool if we can,
6609   // otherwise spill it to a stack slot.
6610   // TODO: This isn't quite right. We need to handle these according to
6611   // the addressing mode that the constraint wants. Also, this may take
6612   // an additional register for the computation and we don't want that
6613   // either.
6614 
6615   // If the operand is a float, integer, or vector constant, spill to a
6616   // constant pool entry to get its address.
6617   const Value *OpVal = OpInfo.CallOperandVal;
6618   if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
6619       isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) {
6620     OpInfo.CallOperand = DAG.getConstantPool(
6621         cast<Constant>(OpVal), TLI.getPointerTy(DAG.getDataLayout()));
6622     return Chain;
6623   }
6624 
6625   // Otherwise, create a stack slot and emit a store to it before the asm.
6626   Type *Ty = OpVal->getType();
6627   auto &DL = DAG.getDataLayout();
6628   uint64_t TySize = DL.getTypeAllocSize(Ty);
6629   unsigned Align = DL.getPrefTypeAlignment(Ty);
6630   MachineFunction &MF = DAG.getMachineFunction();
6631   int SSFI = MF.getFrameInfo().CreateStackObject(TySize, Align, false);
6632   SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getFrameIndexTy(DL));
6633   Chain = DAG.getStore(Chain, Location, OpInfo.CallOperand, StackSlot,
6634                        MachinePointerInfo::getFixedStack(MF, SSFI));
6635   OpInfo.CallOperand = StackSlot;
6636 
6637   return Chain;
6638 }
6639 
6640 /// GetRegistersForValue - Assign registers (virtual or physical) for the
6641 /// specified operand.  We prefer to assign virtual registers, to allow the
6642 /// register allocator to handle the assignment process.  However, if the asm
6643 /// uses features that we can't model on machineinstrs, we have SDISel do the
6644 /// allocation.  This produces generally horrible, but correct, code.
6645 ///
6646 ///   OpInfo describes the operand.
6647 ///
6648 static void GetRegistersForValue(SelectionDAG &DAG, const TargetLowering &TLI,
6649                                  const SDLoc &DL,
6650                                  SDISelAsmOperandInfo &OpInfo) {
6651   LLVMContext &Context = *DAG.getContext();
6652 
6653   MachineFunction &MF = DAG.getMachineFunction();
6654   SmallVector<unsigned, 4> Regs;
6655   const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
6656 
6657   // If this is a constraint for a single physreg, or a constraint for a
6658   // register class, find it.
6659   std::pair<unsigned, const TargetRegisterClass *> PhysReg =
6660       TLI.getRegForInlineAsmConstraint(&TRI, OpInfo.ConstraintCode,
6661                                        OpInfo.ConstraintVT);
6662 
6663   unsigned NumRegs = 1;
6664   if (OpInfo.ConstraintVT != MVT::Other) {
6665     // If this is a FP input in an integer register (or visa versa) insert a bit
6666     // cast of the input value.  More generally, handle any case where the input
6667     // value disagrees with the register class we plan to stick this in.
6668     if (OpInfo.Type == InlineAsm::isInput && PhysReg.second &&
6669         !TRI.isTypeLegalForClass(*PhysReg.second, OpInfo.ConstraintVT)) {
6670       // Try to convert to the first EVT that the reg class contains.  If the
6671       // types are identical size, use a bitcast to convert (e.g. two differing
6672       // vector types).
6673       MVT RegVT = *TRI.legalclasstypes_begin(*PhysReg.second);
6674       if (RegVT.getSizeInBits() == OpInfo.CallOperand.getValueSizeInBits()) {
6675         OpInfo.CallOperand = DAG.getNode(ISD::BITCAST, DL,
6676                                          RegVT, OpInfo.CallOperand);
6677         OpInfo.ConstraintVT = RegVT;
6678       } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
6679         // If the input is a FP value and we want it in FP registers, do a
6680         // bitcast to the corresponding integer type.  This turns an f64 value
6681         // into i64, which can be passed with two i32 values on a 32-bit
6682         // machine.
6683         RegVT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits());
6684         OpInfo.CallOperand = DAG.getNode(ISD::BITCAST, DL,
6685                                          RegVT, OpInfo.CallOperand);
6686         OpInfo.ConstraintVT = RegVT;
6687       }
6688     }
6689 
6690     NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT);
6691   }
6692 
6693   MVT RegVT;
6694   EVT ValueVT = OpInfo.ConstraintVT;
6695 
6696   // If this is a constraint for a specific physical register, like {r17},
6697   // assign it now.
6698   if (unsigned AssignedReg = PhysReg.first) {
6699     const TargetRegisterClass *RC = PhysReg.second;
6700     if (OpInfo.ConstraintVT == MVT::Other)
6701       ValueVT = *TRI.legalclasstypes_begin(*RC);
6702 
6703     // Get the actual register value type.  This is important, because the user
6704     // may have asked for (e.g.) the AX register in i32 type.  We need to
6705     // remember that AX is actually i16 to get the right extension.
6706     RegVT = *TRI.legalclasstypes_begin(*RC);
6707 
6708     // This is a explicit reference to a physical register.
6709     Regs.push_back(AssignedReg);
6710 
6711     // If this is an expanded reference, add the rest of the regs to Regs.
6712     if (NumRegs != 1) {
6713       TargetRegisterClass::iterator I = RC->begin();
6714       for (; *I != AssignedReg; ++I)
6715         assert(I != RC->end() && "Didn't find reg!");
6716 
6717       // Already added the first reg.
6718       --NumRegs; ++I;
6719       for (; NumRegs; --NumRegs, ++I) {
6720         assert(I != RC->end() && "Ran out of registers to allocate!");
6721         Regs.push_back(*I);
6722       }
6723     }
6724 
6725     OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
6726     return;
6727   }
6728 
6729   // Otherwise, if this was a reference to an LLVM register class, create vregs
6730   // for this reference.
6731   if (const TargetRegisterClass *RC = PhysReg.second) {
6732     RegVT = *TRI.legalclasstypes_begin(*RC);
6733     if (OpInfo.ConstraintVT == MVT::Other)
6734       ValueVT = RegVT;
6735 
6736     // Create the appropriate number of virtual registers.
6737     MachineRegisterInfo &RegInfo = MF.getRegInfo();
6738     for (; NumRegs; --NumRegs)
6739       Regs.push_back(RegInfo.createVirtualRegister(RC));
6740 
6741     OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
6742     return;
6743   }
6744 
6745   // Otherwise, we couldn't allocate enough registers for this.
6746 }
6747 
6748 static unsigned
6749 findMatchingInlineAsmOperand(unsigned OperandNo,
6750                              const std::vector<SDValue> &AsmNodeOperands) {
6751   // Scan until we find the definition we already emitted of this operand.
6752   unsigned CurOp = InlineAsm::Op_FirstOperand;
6753   for (; OperandNo; --OperandNo) {
6754     // Advance to the next operand.
6755     unsigned OpFlag =
6756         cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
6757     assert((InlineAsm::isRegDefKind(OpFlag) ||
6758             InlineAsm::isRegDefEarlyClobberKind(OpFlag) ||
6759             InlineAsm::isMemKind(OpFlag)) &&
6760            "Skipped past definitions?");
6761     CurOp += InlineAsm::getNumOperandRegisters(OpFlag) + 1;
6762   }
6763   return CurOp;
6764 }
6765 
6766 /// Fill \p Regs with \p NumRegs new virtual registers of type \p RegVT
6767 /// \return true if it has succeeded, false otherwise
6768 static bool createVirtualRegs(SmallVector<unsigned, 4> &Regs, unsigned NumRegs,
6769                               MVT RegVT, SelectionDAG &DAG) {
6770   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6771   MachineRegisterInfo &RegInfo = DAG.getMachineFunction().getRegInfo();
6772   for (unsigned i = 0, e = NumRegs; i != e; ++i) {
6773     if (const TargetRegisterClass *RC = TLI.getRegClassFor(RegVT))
6774       Regs.push_back(RegInfo.createVirtualRegister(RC));
6775     else
6776       return false;
6777   }
6778   return true;
6779 }
6780 
6781 class ExtraFlags {
6782   unsigned Flags = 0;
6783 
6784 public:
6785   explicit ExtraFlags(ImmutableCallSite CS) {
6786     const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
6787     if (IA->hasSideEffects())
6788       Flags |= InlineAsm::Extra_HasSideEffects;
6789     if (IA->isAlignStack())
6790       Flags |= InlineAsm::Extra_IsAlignStack;
6791     if (CS.isConvergent())
6792       Flags |= InlineAsm::Extra_IsConvergent;
6793     Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
6794   }
6795 
6796   void update(const llvm::TargetLowering::AsmOperandInfo &OpInfo) {
6797     // Ideally, we would only check against memory constraints.  However, the
6798     // meaning of an Other constraint can be target-specific and we can't easily
6799     // reason about it.  Therefore, be conservative and set MayLoad/MayStore
6800     // for Other constraints as well.
6801     if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
6802         OpInfo.ConstraintType == TargetLowering::C_Other) {
6803       if (OpInfo.Type == InlineAsm::isInput)
6804         Flags |= InlineAsm::Extra_MayLoad;
6805       else if (OpInfo.Type == InlineAsm::isOutput)
6806         Flags |= InlineAsm::Extra_MayStore;
6807       else if (OpInfo.Type == InlineAsm::isClobber)
6808         Flags |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore);
6809     }
6810   }
6811 
6812   unsigned get() const { return Flags; }
6813 };
6814 
6815 /// visitInlineAsm - Handle a call to an InlineAsm object.
6816 ///
6817 void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
6818   const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
6819 
6820   /// ConstraintOperands - Information about all of the constraints.
6821   SDISelAsmOperandInfoVector ConstraintOperands;
6822 
6823   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6824   TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints(
6825       DAG.getDataLayout(), DAG.getSubtarget().getRegisterInfo(), CS);
6826 
6827   bool hasMemory = false;
6828 
6829   // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore
6830   ExtraFlags ExtraInfo(CS);
6831 
6832   unsigned ArgNo = 0;   // ArgNo - The argument of the CallInst.
6833   unsigned ResNo = 0;   // ResNo - The result number of the next output.
6834   for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
6835     ConstraintOperands.push_back(SDISelAsmOperandInfo(TargetConstraints[i]));
6836     SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
6837 
6838     MVT OpVT = MVT::Other;
6839 
6840     // Compute the value type for each operand.
6841     if (OpInfo.Type == InlineAsm::isInput ||
6842         (OpInfo.Type == InlineAsm::isOutput && OpInfo.isIndirect)) {
6843       OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
6844 
6845       // Process the call argument. BasicBlocks are labels, currently appearing
6846       // only in asm's.
6847       if (const BasicBlock *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
6848         OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
6849       } else {
6850         OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
6851       }
6852 
6853       OpVT =
6854           OpInfo
6855               .getCallOperandValEVT(*DAG.getContext(), TLI, DAG.getDataLayout())
6856               .getSimpleVT();
6857     }
6858 
6859     if (OpInfo.Type == InlineAsm::isOutput && !OpInfo.isIndirect) {
6860       // The return value of the call is this value.  As such, there is no
6861       // corresponding argument.
6862       assert(!CS.getType()->isVoidTy() && "Bad inline asm!");
6863       if (StructType *STy = dyn_cast<StructType>(CS.getType())) {
6864         OpVT = TLI.getSimpleValueType(DAG.getDataLayout(),
6865                                       STy->getElementType(ResNo));
6866       } else {
6867         assert(ResNo == 0 && "Asm only has one result!");
6868         OpVT = TLI.getSimpleValueType(DAG.getDataLayout(), CS.getType());
6869       }
6870       ++ResNo;
6871     }
6872 
6873     OpInfo.ConstraintVT = OpVT;
6874 
6875     if (!hasMemory)
6876       hasMemory = OpInfo.hasMemory(TLI);
6877 
6878     // Determine if this InlineAsm MayLoad or MayStore based on the constraints.
6879     // FIXME: Could we compute this on OpInfo rather than TargetConstraints[i]?
6880     auto TargetConstraint = TargetConstraints[i];
6881 
6882     // Compute the constraint code and ConstraintType to use.
6883     TLI.ComputeConstraintToUse(TargetConstraint, SDValue());
6884 
6885     ExtraInfo.update(TargetConstraint);
6886   }
6887 
6888   SDValue Chain, Flag;
6889 
6890   // We won't need to flush pending loads if this asm doesn't touch
6891   // memory and is nonvolatile.
6892   if (hasMemory || IA->hasSideEffects())
6893     Chain = getRoot();
6894   else
6895     Chain = DAG.getRoot();
6896 
6897   // Second pass over the constraints: compute which constraint option to use
6898   // and assign registers to constraints that want a specific physreg.
6899   for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
6900     SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
6901 
6902     // If this is an output operand with a matching input operand, look up the
6903     // matching input. If their types mismatch, e.g. one is an integer, the
6904     // other is floating point, or their sizes are different, flag it as an
6905     // error.
6906     if (OpInfo.hasMatchingInput()) {
6907       SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
6908       patchMatchingInput(OpInfo, Input, DAG);
6909     }
6910 
6911     // Compute the constraint code and ConstraintType to use.
6912     TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
6913 
6914     if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
6915         OpInfo.Type == InlineAsm::isClobber)
6916       continue;
6917 
6918     // If this is a memory input, and if the operand is not indirect, do what we
6919     // need to to provide an address for the memory input.
6920     if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
6921         !OpInfo.isIndirect) {
6922       assert((OpInfo.isMultipleAlternative ||
6923               (OpInfo.Type == InlineAsm::isInput)) &&
6924              "Can only indirectify direct input operands!");
6925 
6926       // Memory operands really want the address of the value.
6927       Chain = getAddressForMemoryInput(Chain, getCurSDLoc(), OpInfo, DAG);
6928 
6929       // There is no longer a Value* corresponding to this operand.
6930       OpInfo.CallOperandVal = nullptr;
6931 
6932       // It is now an indirect operand.
6933       OpInfo.isIndirect = true;
6934     }
6935 
6936     // If this constraint is for a specific register, allocate it before
6937     // anything else.
6938     if (OpInfo.ConstraintType == TargetLowering::C_Register)
6939       GetRegistersForValue(DAG, TLI, getCurSDLoc(), OpInfo);
6940   }
6941 
6942   // Third pass - Loop over all of the operands, assigning virtual or physregs
6943   // to register class operands.
6944   for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
6945     SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
6946 
6947     // C_Register operands have already been allocated, Other/Memory don't need
6948     // to be.
6949     if (OpInfo.ConstraintType == TargetLowering::C_RegisterClass)
6950       GetRegistersForValue(DAG, TLI, getCurSDLoc(), OpInfo);
6951   }
6952 
6953   // AsmNodeOperands - The operands for the ISD::INLINEASM node.
6954   std::vector<SDValue> AsmNodeOperands;
6955   AsmNodeOperands.push_back(SDValue());  // reserve space for input chain
6956   AsmNodeOperands.push_back(DAG.getTargetExternalSymbol(
6957       IA->getAsmString().c_str(), TLI.getPointerTy(DAG.getDataLayout())));
6958 
6959   // If we have a !srcloc metadata node associated with it, we want to attach
6960   // this to the ultimately generated inline asm machineinstr.  To do this, we
6961   // pass in the third operand as this (potentially null) inline asm MDNode.
6962   const MDNode *SrcLoc = CS.getInstruction()->getMetadata("srcloc");
6963   AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc));
6964 
6965   // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore
6966   // bits as operand 3.
6967   AsmNodeOperands.push_back(DAG.getTargetConstant(
6968       ExtraInfo.get(), getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
6969 
6970   // Loop over all of the inputs, copying the operand values into the
6971   // appropriate registers and processing the output regs.
6972   RegsForValue RetValRegs;
6973 
6974   // IndirectStoresToEmit - The set of stores to emit after the inline asm node.
6975   std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit;
6976 
6977   for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
6978     SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
6979 
6980     switch (OpInfo.Type) {
6981     case InlineAsm::isOutput: {
6982       if (OpInfo.ConstraintType != TargetLowering::C_RegisterClass &&
6983           OpInfo.ConstraintType != TargetLowering::C_Register) {
6984         // Memory output, or 'other' output (e.g. 'X' constraint).
6985         assert(OpInfo.isIndirect && "Memory output must be indirect operand");
6986 
6987         unsigned ConstraintID =
6988             TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
6989         assert(ConstraintID != InlineAsm::Constraint_Unknown &&
6990                "Failed to convert memory constraint code to constraint id.");
6991 
6992         // Add information to the INLINEASM node to know about this output.
6993         unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
6994         OpFlags = InlineAsm::getFlagWordForMem(OpFlags, ConstraintID);
6995         AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlags, getCurSDLoc(),
6996                                                         MVT::i32));
6997         AsmNodeOperands.push_back(OpInfo.CallOperand);
6998         break;
6999       }
7000 
7001       // Otherwise, this is a register or register class output.
7002 
7003       // Copy the output from the appropriate register.  Find a register that
7004       // we can use.
7005       if (OpInfo.AssignedRegs.Regs.empty()) {
7006         emitInlineAsmError(
7007             CS, "couldn't allocate output register for constraint '" +
7008                     Twine(OpInfo.ConstraintCode) + "'");
7009         return;
7010       }
7011 
7012       // If this is an indirect operand, store through the pointer after the
7013       // asm.
7014       if (OpInfo.isIndirect) {
7015         IndirectStoresToEmit.push_back(std::make_pair(OpInfo.AssignedRegs,
7016                                                       OpInfo.CallOperandVal));
7017       } else {
7018         // This is the result value of the call.
7019         assert(!CS.getType()->isVoidTy() && "Bad inline asm!");
7020         // Concatenate this output onto the outputs list.
7021         RetValRegs.append(OpInfo.AssignedRegs);
7022       }
7023 
7024       // Add information to the INLINEASM node to know that this register is
7025       // set.
7026       OpInfo.AssignedRegs
7027           .AddInlineAsmOperands(OpInfo.isEarlyClobber
7028                                     ? InlineAsm::Kind_RegDefEarlyClobber
7029                                     : InlineAsm::Kind_RegDef,
7030                                 false, 0, getCurSDLoc(), DAG, AsmNodeOperands);
7031       break;
7032     }
7033     case InlineAsm::isInput: {
7034       SDValue InOperandVal = OpInfo.CallOperand;
7035 
7036       if (OpInfo.isMatchingInputConstraint()) {
7037         // If this is required to match an output register we have already set,
7038         // just use its register.
7039         auto CurOp = findMatchingInlineAsmOperand(OpInfo.getMatchedOperand(),
7040                                                   AsmNodeOperands);
7041         unsigned OpFlag =
7042           cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
7043         if (InlineAsm::isRegDefKind(OpFlag) ||
7044             InlineAsm::isRegDefEarlyClobberKind(OpFlag)) {
7045           // Add (OpFlag&0xffff)>>3 registers to MatchedRegs.
7046           if (OpInfo.isIndirect) {
7047             // This happens on gcc/testsuite/gcc.dg/pr8788-1.c
7048             emitInlineAsmError(CS, "inline asm not supported yet:"
7049                                    " don't know how to handle tied "
7050                                    "indirect register inputs");
7051             return;
7052           }
7053 
7054           MVT RegVT = AsmNodeOperands[CurOp+1].getSimpleValueType();
7055           SmallVector<unsigned, 4> Regs;
7056 
7057           if (!createVirtualRegs(Regs,
7058                                  InlineAsm::getNumOperandRegisters(OpFlag),
7059                                  RegVT, DAG)) {
7060             emitInlineAsmError(CS, "inline asm error: This value type register "
7061                                    "class is not natively supported!");
7062             return;
7063           }
7064 
7065           RegsForValue MatchedRegs(Regs, RegVT, InOperandVal.getValueType());
7066 
7067           SDLoc dl = getCurSDLoc();
7068           // Use the produced MatchedRegs object to
7069           MatchedRegs.getCopyToRegs(InOperandVal, DAG, dl,
7070                                     Chain, &Flag, CS.getInstruction());
7071           MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse,
7072                                            true, OpInfo.getMatchedOperand(), dl,
7073                                            DAG, AsmNodeOperands);
7074           break;
7075         }
7076 
7077         assert(InlineAsm::isMemKind(OpFlag) && "Unknown matching constraint!");
7078         assert(InlineAsm::getNumOperandRegisters(OpFlag) == 1 &&
7079                "Unexpected number of operands");
7080         // Add information to the INLINEASM node to know about this input.
7081         // See InlineAsm.h isUseOperandTiedToDef.
7082         OpFlag = InlineAsm::convertMemFlagWordToMatchingFlagWord(OpFlag);
7083         OpFlag = InlineAsm::getFlagWordForMatchingOp(OpFlag,
7084                                                     OpInfo.getMatchedOperand());
7085         AsmNodeOperands.push_back(DAG.getTargetConstant(
7086             OpFlag, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
7087         AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
7088         break;
7089       }
7090 
7091       // Treat indirect 'X' constraint as memory.
7092       if (OpInfo.ConstraintType == TargetLowering::C_Other &&
7093           OpInfo.isIndirect)
7094         OpInfo.ConstraintType = TargetLowering::C_Memory;
7095 
7096       if (OpInfo.ConstraintType == TargetLowering::C_Other) {
7097         std::vector<SDValue> Ops;
7098         TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode,
7099                                           Ops, DAG);
7100         if (Ops.empty()) {
7101           emitInlineAsmError(CS, "invalid operand for inline asm constraint '" +
7102                                      Twine(OpInfo.ConstraintCode) + "'");
7103           return;
7104         }
7105 
7106         // Add information to the INLINEASM node to know about this input.
7107         unsigned ResOpType =
7108           InlineAsm::getFlagWord(InlineAsm::Kind_Imm, Ops.size());
7109         AsmNodeOperands.push_back(DAG.getTargetConstant(
7110             ResOpType, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
7111         AsmNodeOperands.insert(AsmNodeOperands.end(), Ops.begin(), Ops.end());
7112         break;
7113       }
7114 
7115       if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
7116         assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
7117         assert(InOperandVal.getValueType() ==
7118                    TLI.getPointerTy(DAG.getDataLayout()) &&
7119                "Memory operands expect pointer values");
7120 
7121         unsigned ConstraintID =
7122             TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
7123         assert(ConstraintID != InlineAsm::Constraint_Unknown &&
7124                "Failed to convert memory constraint code to constraint id.");
7125 
7126         // Add information to the INLINEASM node to know about this input.
7127         unsigned ResOpType = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
7128         ResOpType = InlineAsm::getFlagWordForMem(ResOpType, ConstraintID);
7129         AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
7130                                                         getCurSDLoc(),
7131                                                         MVT::i32));
7132         AsmNodeOperands.push_back(InOperandVal);
7133         break;
7134       }
7135 
7136       assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
7137               OpInfo.ConstraintType == TargetLowering::C_Register) &&
7138              "Unknown constraint type!");
7139 
7140       // TODO: Support this.
7141       if (OpInfo.isIndirect) {
7142         emitInlineAsmError(
7143             CS, "Don't know how to handle indirect register inputs yet "
7144                 "for constraint '" +
7145                     Twine(OpInfo.ConstraintCode) + "'");
7146         return;
7147       }
7148 
7149       // Copy the input into the appropriate registers.
7150       if (OpInfo.AssignedRegs.Regs.empty()) {
7151         emitInlineAsmError(CS, "couldn't allocate input reg for constraint '" +
7152                                    Twine(OpInfo.ConstraintCode) + "'");
7153         return;
7154       }
7155 
7156       SDLoc dl = getCurSDLoc();
7157 
7158       OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, dl,
7159                                         Chain, &Flag, CS.getInstruction());
7160 
7161       OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse, false, 0,
7162                                                dl, DAG, AsmNodeOperands);
7163       break;
7164     }
7165     case InlineAsm::isClobber: {
7166       // Add the clobbered value to the operand list, so that the register
7167       // allocator is aware that the physreg got clobbered.
7168       if (!OpInfo.AssignedRegs.Regs.empty())
7169         OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_Clobber,
7170                                                  false, 0, getCurSDLoc(), DAG,
7171                                                  AsmNodeOperands);
7172       break;
7173     }
7174     }
7175   }
7176 
7177   // Finish up input operands.  Set the input chain and add the flag last.
7178   AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
7179   if (Flag.getNode()) AsmNodeOperands.push_back(Flag);
7180 
7181   Chain = DAG.getNode(ISD::INLINEASM, getCurSDLoc(),
7182                       DAG.getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
7183   Flag = Chain.getValue(1);
7184 
7185   // If this asm returns a register value, copy the result from that register
7186   // and set it as the value of the call.
7187   if (!RetValRegs.Regs.empty()) {
7188     SDValue Val = RetValRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(),
7189                                              Chain, &Flag, CS.getInstruction());
7190 
7191     // FIXME: Why don't we do this for inline asms with MRVs?
7192     if (CS.getType()->isSingleValueType() && CS.getType()->isSized()) {
7193       EVT ResultType = TLI.getValueType(DAG.getDataLayout(), CS.getType());
7194 
7195       // If any of the results of the inline asm is a vector, it may have the
7196       // wrong width/num elts.  This can happen for register classes that can
7197       // contain multiple different value types.  The preg or vreg allocated may
7198       // not have the same VT as was expected.  Convert it to the right type
7199       // with bit_convert.
7200       if (ResultType != Val.getValueType() && Val.getValueType().isVector()) {
7201         Val = DAG.getNode(ISD::BITCAST, getCurSDLoc(),
7202                           ResultType, Val);
7203 
7204       } else if (ResultType != Val.getValueType() &&
7205                  ResultType.isInteger() && Val.getValueType().isInteger()) {
7206         // If a result value was tied to an input value, the computed result may
7207         // have a wider width than the expected result.  Extract the relevant
7208         // portion.
7209         Val = DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), ResultType, Val);
7210       }
7211 
7212       assert(ResultType == Val.getValueType() && "Asm result value mismatch!");
7213     }
7214 
7215     setValue(CS.getInstruction(), Val);
7216     // Don't need to use this as a chain in this case.
7217     if (!IA->hasSideEffects() && !hasMemory && IndirectStoresToEmit.empty())
7218       return;
7219   }
7220 
7221   std::vector<std::pair<SDValue, const Value *> > StoresToEmit;
7222 
7223   // Process indirect outputs, first output all of the flagged copies out of
7224   // physregs.
7225   for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) {
7226     RegsForValue &OutRegs = IndirectStoresToEmit[i].first;
7227     const Value *Ptr = IndirectStoresToEmit[i].second;
7228     SDValue OutVal = OutRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(),
7229                                              Chain, &Flag, IA);
7230     StoresToEmit.push_back(std::make_pair(OutVal, Ptr));
7231   }
7232 
7233   // Emit the non-flagged stores from the physregs.
7234   SmallVector<SDValue, 8> OutChains;
7235   for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i) {
7236     SDValue Val = DAG.getStore(Chain, getCurSDLoc(), StoresToEmit[i].first,
7237                                getValue(StoresToEmit[i].second),
7238                                MachinePointerInfo(StoresToEmit[i].second));
7239     OutChains.push_back(Val);
7240   }
7241 
7242   if (!OutChains.empty())
7243     Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, OutChains);
7244 
7245   DAG.setRoot(Chain);
7246 }
7247 
7248 void SelectionDAGBuilder::emitInlineAsmError(ImmutableCallSite CS,
7249                                              const Twine &Message) {
7250   LLVMContext &Ctx = *DAG.getContext();
7251   Ctx.emitError(CS.getInstruction(), Message);
7252 
7253   // Make sure we leave the DAG in a valid state
7254   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7255   auto VT = TLI.getValueType(DAG.getDataLayout(), CS.getType());
7256   setValue(CS.getInstruction(), DAG.getUNDEF(VT));
7257 }
7258 
7259 void SelectionDAGBuilder::visitVAStart(const CallInst &I) {
7260   DAG.setRoot(DAG.getNode(ISD::VASTART, getCurSDLoc(),
7261                           MVT::Other, getRoot(),
7262                           getValue(I.getArgOperand(0)),
7263                           DAG.getSrcValue(I.getArgOperand(0))));
7264 }
7265 
7266 void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
7267   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7268   const DataLayout &DL = DAG.getDataLayout();
7269   SDValue V = DAG.getVAArg(TLI.getValueType(DAG.getDataLayout(), I.getType()),
7270                            getCurSDLoc(), getRoot(), getValue(I.getOperand(0)),
7271                            DAG.getSrcValue(I.getOperand(0)),
7272                            DL.getABITypeAlignment(I.getType()));
7273   setValue(&I, V);
7274   DAG.setRoot(V.getValue(1));
7275 }
7276 
7277 void SelectionDAGBuilder::visitVAEnd(const CallInst &I) {
7278   DAG.setRoot(DAG.getNode(ISD::VAEND, getCurSDLoc(),
7279                           MVT::Other, getRoot(),
7280                           getValue(I.getArgOperand(0)),
7281                           DAG.getSrcValue(I.getArgOperand(0))));
7282 }
7283 
7284 void SelectionDAGBuilder::visitVACopy(const CallInst &I) {
7285   DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurSDLoc(),
7286                           MVT::Other, getRoot(),
7287                           getValue(I.getArgOperand(0)),
7288                           getValue(I.getArgOperand(1)),
7289                           DAG.getSrcValue(I.getArgOperand(0)),
7290                           DAG.getSrcValue(I.getArgOperand(1))));
7291 }
7292 
7293 SDValue SelectionDAGBuilder::lowerRangeToAssertZExt(SelectionDAG &DAG,
7294                                                     const Instruction &I,
7295                                                     SDValue Op) {
7296   const MDNode *Range = I.getMetadata(LLVMContext::MD_range);
7297   if (!Range)
7298     return Op;
7299 
7300   ConstantRange CR = getConstantRangeFromMetadata(*Range);
7301   if (CR.isFullSet() || CR.isEmptySet() || CR.isWrappedSet())
7302     return Op;
7303 
7304   APInt Lo = CR.getUnsignedMin();
7305   if (!Lo.isMinValue())
7306     return Op;
7307 
7308   APInt Hi = CR.getUnsignedMax();
7309   unsigned Bits = Hi.getActiveBits();
7310 
7311   EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
7312 
7313   SDLoc SL = getCurSDLoc();
7314 
7315   SDValue ZExt = DAG.getNode(ISD::AssertZext, SL, Op.getValueType(), Op,
7316                              DAG.getValueType(SmallVT));
7317   unsigned NumVals = Op.getNode()->getNumValues();
7318   if (NumVals == 1)
7319     return ZExt;
7320 
7321   SmallVector<SDValue, 4> Ops;
7322 
7323   Ops.push_back(ZExt);
7324   for (unsigned I = 1; I != NumVals; ++I)
7325     Ops.push_back(Op.getValue(I));
7326 
7327   return DAG.getMergeValues(Ops, SL);
7328 }
7329 
7330 /// \brief Populate a CallLowerinInfo (into \p CLI) based on the properties of
7331 /// the call being lowered.
7332 ///
7333 /// This is a helper for lowering intrinsics that follow a target calling
7334 /// convention or require stack pointer adjustment. Only a subset of the
7335 /// intrinsic's operands need to participate in the calling convention.
7336 void SelectionDAGBuilder::populateCallLoweringInfo(
7337     TargetLowering::CallLoweringInfo &CLI, ImmutableCallSite CS,
7338     unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy,
7339     bool IsPatchPoint) {
7340   TargetLowering::ArgListTy Args;
7341   Args.reserve(NumArgs);
7342 
7343   // Populate the argument list.
7344   // Attributes for args start at offset 1, after the return attribute.
7345   for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs;
7346        ArgI != ArgE; ++ArgI) {
7347     const Value *V = CS->getOperand(ArgI);
7348 
7349     assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
7350 
7351     TargetLowering::ArgListEntry Entry;
7352     Entry.Node = getValue(V);
7353     Entry.Ty = V->getType();
7354     Entry.setAttributes(&CS, ArgIdx);
7355     Args.push_back(Entry);
7356   }
7357 
7358   CLI.setDebugLoc(getCurSDLoc())
7359       .setChain(getRoot())
7360       .setCallee(CS.getCallingConv(), ReturnTy, Callee, std::move(Args))
7361       .setDiscardResult(CS->use_empty())
7362       .setIsPatchPoint(IsPatchPoint);
7363 }
7364 
7365 /// \brief Add a stack map intrinsic call's live variable operands to a stackmap
7366 /// or patchpoint target node's operand list.
7367 ///
7368 /// Constants are converted to TargetConstants purely as an optimization to
7369 /// avoid constant materialization and register allocation.
7370 ///
7371 /// FrameIndex operands are converted to TargetFrameIndex so that ISEL does not
7372 /// generate addess computation nodes, and so ExpandISelPseudo can convert the
7373 /// TargetFrameIndex into a DirectMemRefOp StackMap location. This avoids
7374 /// address materialization and register allocation, but may also be required
7375 /// for correctness. If a StackMap (or PatchPoint) intrinsic directly uses an
7376 /// alloca in the entry block, then the runtime may assume that the alloca's
7377 /// StackMap location can be read immediately after compilation and that the
7378 /// location is valid at any point during execution (this is similar to the
7379 /// assumption made by the llvm.gcroot intrinsic). If the alloca's location were
7380 /// only available in a register, then the runtime would need to trap when
7381 /// execution reaches the StackMap in order to read the alloca's location.
7382 static void addStackMapLiveVars(ImmutableCallSite CS, unsigned StartIdx,
7383                                 const SDLoc &DL, SmallVectorImpl<SDValue> &Ops,
7384                                 SelectionDAGBuilder &Builder) {
7385   for (unsigned i = StartIdx, e = CS.arg_size(); i != e; ++i) {
7386     SDValue OpVal = Builder.getValue(CS.getArgument(i));
7387     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(OpVal)) {
7388       Ops.push_back(
7389         Builder.DAG.getTargetConstant(StackMaps::ConstantOp, DL, MVT::i64));
7390       Ops.push_back(
7391         Builder.DAG.getTargetConstant(C->getSExtValue(), DL, MVT::i64));
7392     } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(OpVal)) {
7393       const TargetLowering &TLI = Builder.DAG.getTargetLoweringInfo();
7394       Ops.push_back(Builder.DAG.getTargetFrameIndex(
7395           FI->getIndex(), TLI.getFrameIndexTy(Builder.DAG.getDataLayout())));
7396     } else
7397       Ops.push_back(OpVal);
7398   }
7399 }
7400 
7401 /// \brief Lower llvm.experimental.stackmap directly to its target opcode.
7402 void SelectionDAGBuilder::visitStackmap(const CallInst &CI) {
7403   // void @llvm.experimental.stackmap(i32 <id>, i32 <numShadowBytes>,
7404   //                                  [live variables...])
7405 
7406   assert(CI.getType()->isVoidTy() && "Stackmap cannot return a value.");
7407 
7408   SDValue Chain, InFlag, Callee, NullPtr;
7409   SmallVector<SDValue, 32> Ops;
7410 
7411   SDLoc DL = getCurSDLoc();
7412   Callee = getValue(CI.getCalledValue());
7413   NullPtr = DAG.getIntPtrConstant(0, DL, true);
7414 
7415   // The stackmap intrinsic only records the live variables (the arguemnts
7416   // passed to it) and emits NOPS (if requested). Unlike the patchpoint
7417   // intrinsic, this won't be lowered to a function call. This means we don't
7418   // have to worry about calling conventions and target specific lowering code.
7419   // Instead we perform the call lowering right here.
7420   //
7421   // chain, flag = CALLSEQ_START(chain, 0)
7422   // chain, flag = STACKMAP(id, nbytes, ..., chain, flag)
7423   // chain, flag = CALLSEQ_END(chain, 0, 0, flag)
7424   //
7425   Chain = DAG.getCALLSEQ_START(getRoot(), NullPtr, DL);
7426   InFlag = Chain.getValue(1);
7427 
7428   // Add the <id> and <numBytes> constants.
7429   SDValue IDVal = getValue(CI.getOperand(PatchPointOpers::IDPos));
7430   Ops.push_back(DAG.getTargetConstant(
7431                   cast<ConstantSDNode>(IDVal)->getZExtValue(), DL, MVT::i64));
7432   SDValue NBytesVal = getValue(CI.getOperand(PatchPointOpers::NBytesPos));
7433   Ops.push_back(DAG.getTargetConstant(
7434                   cast<ConstantSDNode>(NBytesVal)->getZExtValue(), DL,
7435                   MVT::i32));
7436 
7437   // Push live variables for the stack map.
7438   addStackMapLiveVars(&CI, 2, DL, Ops, *this);
7439 
7440   // We are not pushing any register mask info here on the operands list,
7441   // because the stackmap doesn't clobber anything.
7442 
7443   // Push the chain and the glue flag.
7444   Ops.push_back(Chain);
7445   Ops.push_back(InFlag);
7446 
7447   // Create the STACKMAP node.
7448   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7449   SDNode *SM = DAG.getMachineNode(TargetOpcode::STACKMAP, DL, NodeTys, Ops);
7450   Chain = SDValue(SM, 0);
7451   InFlag = Chain.getValue(1);
7452 
7453   Chain = DAG.getCALLSEQ_END(Chain, NullPtr, NullPtr, InFlag, DL);
7454 
7455   // Stackmaps don't generate values, so nothing goes into the NodeMap.
7456 
7457   // Set the root to the target-lowered call chain.
7458   DAG.setRoot(Chain);
7459 
7460   // Inform the Frame Information that we have a stackmap in this function.
7461   FuncInfo.MF->getFrameInfo().setHasStackMap();
7462 }
7463 
7464 /// \brief Lower llvm.experimental.patchpoint directly to its target opcode.
7465 void SelectionDAGBuilder::visitPatchpoint(ImmutableCallSite CS,
7466                                           const BasicBlock *EHPadBB) {
7467   // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
7468   //                                                 i32 <numBytes>,
7469   //                                                 i8* <target>,
7470   //                                                 i32 <numArgs>,
7471   //                                                 [Args...],
7472   //                                                 [live variables...])
7473 
7474   CallingConv::ID CC = CS.getCallingConv();
7475   bool IsAnyRegCC = CC == CallingConv::AnyReg;
7476   bool HasDef = !CS->getType()->isVoidTy();
7477   SDLoc dl = getCurSDLoc();
7478   SDValue Callee = getValue(CS->getOperand(PatchPointOpers::TargetPos));
7479 
7480   // Handle immediate and symbolic callees.
7481   if (auto* ConstCallee = dyn_cast<ConstantSDNode>(Callee))
7482     Callee = DAG.getIntPtrConstant(ConstCallee->getZExtValue(), dl,
7483                                    /*isTarget=*/true);
7484   else if (auto* SymbolicCallee = dyn_cast<GlobalAddressSDNode>(Callee))
7485     Callee =  DAG.getTargetGlobalAddress(SymbolicCallee->getGlobal(),
7486                                          SDLoc(SymbolicCallee),
7487                                          SymbolicCallee->getValueType(0));
7488 
7489   // Get the real number of arguments participating in the call <numArgs>
7490   SDValue NArgVal = getValue(CS.getArgument(PatchPointOpers::NArgPos));
7491   unsigned NumArgs = cast<ConstantSDNode>(NArgVal)->getZExtValue();
7492 
7493   // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
7494   // Intrinsics include all meta-operands up to but not including CC.
7495   unsigned NumMetaOpers = PatchPointOpers::CCPos;
7496   assert(CS.arg_size() >= NumMetaOpers + NumArgs &&
7497          "Not enough arguments provided to the patchpoint intrinsic");
7498 
7499   // For AnyRegCC the arguments are lowered later on manually.
7500   unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
7501   Type *ReturnTy =
7502     IsAnyRegCC ? Type::getVoidTy(*DAG.getContext()) : CS->getType();
7503 
7504   TargetLowering::CallLoweringInfo CLI(DAG);
7505   populateCallLoweringInfo(CLI, CS, NumMetaOpers, NumCallArgs, Callee, ReturnTy,
7506                            true);
7507   std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
7508 
7509   SDNode *CallEnd = Result.second.getNode();
7510   if (HasDef && (CallEnd->getOpcode() == ISD::CopyFromReg))
7511     CallEnd = CallEnd->getOperand(0).getNode();
7512 
7513   /// Get a call instruction from the call sequence chain.
7514   /// Tail calls are not allowed.
7515   assert(CallEnd->getOpcode() == ISD::CALLSEQ_END &&
7516          "Expected a callseq node.");
7517   SDNode *Call = CallEnd->getOperand(0).getNode();
7518   bool HasGlue = Call->getGluedNode();
7519 
7520   // Replace the target specific call node with the patchable intrinsic.
7521   SmallVector<SDValue, 8> Ops;
7522 
7523   // Add the <id> and <numBytes> constants.
7524   SDValue IDVal = getValue(CS->getOperand(PatchPointOpers::IDPos));
7525   Ops.push_back(DAG.getTargetConstant(
7526                   cast<ConstantSDNode>(IDVal)->getZExtValue(), dl, MVT::i64));
7527   SDValue NBytesVal = getValue(CS->getOperand(PatchPointOpers::NBytesPos));
7528   Ops.push_back(DAG.getTargetConstant(
7529                   cast<ConstantSDNode>(NBytesVal)->getZExtValue(), dl,
7530                   MVT::i32));
7531 
7532   // Add the callee.
7533   Ops.push_back(Callee);
7534 
7535   // Adjust <numArgs> to account for any arguments that have been passed on the
7536   // stack instead.
7537   // Call Node: Chain, Target, {Args}, RegMask, [Glue]
7538   unsigned NumCallRegArgs = Call->getNumOperands() - (HasGlue ? 4 : 3);
7539   NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs;
7540   Ops.push_back(DAG.getTargetConstant(NumCallRegArgs, dl, MVT::i32));
7541 
7542   // Add the calling convention
7543   Ops.push_back(DAG.getTargetConstant((unsigned)CC, dl, MVT::i32));
7544 
7545   // Add the arguments we omitted previously. The register allocator should
7546   // place these in any free register.
7547   if (IsAnyRegCC)
7548     for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i)
7549       Ops.push_back(getValue(CS.getArgument(i)));
7550 
7551   // Push the arguments from the call instruction up to the register mask.
7552   SDNode::op_iterator e = HasGlue ? Call->op_end()-2 : Call->op_end()-1;
7553   Ops.append(Call->op_begin() + 2, e);
7554 
7555   // Push live variables for the stack map.
7556   addStackMapLiveVars(CS, NumMetaOpers + NumArgs, dl, Ops, *this);
7557 
7558   // Push the register mask info.
7559   if (HasGlue)
7560     Ops.push_back(*(Call->op_end()-2));
7561   else
7562     Ops.push_back(*(Call->op_end()-1));
7563 
7564   // Push the chain (this is originally the first operand of the call, but
7565   // becomes now the last or second to last operand).
7566   Ops.push_back(*(Call->op_begin()));
7567 
7568   // Push the glue flag (last operand).
7569   if (HasGlue)
7570     Ops.push_back(*(Call->op_end()-1));
7571 
7572   SDVTList NodeTys;
7573   if (IsAnyRegCC && HasDef) {
7574     // Create the return types based on the intrinsic definition
7575     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7576     SmallVector<EVT, 3> ValueVTs;
7577     ComputeValueVTs(TLI, DAG.getDataLayout(), CS->getType(), ValueVTs);
7578     assert(ValueVTs.size() == 1 && "Expected only one return value type.");
7579 
7580     // There is always a chain and a glue type at the end
7581     ValueVTs.push_back(MVT::Other);
7582     ValueVTs.push_back(MVT::Glue);
7583     NodeTys = DAG.getVTList(ValueVTs);
7584   } else
7585     NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7586 
7587   // Replace the target specific call node with a PATCHPOINT node.
7588   MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHPOINT,
7589                                          dl, NodeTys, Ops);
7590 
7591   // Update the NodeMap.
7592   if (HasDef) {
7593     if (IsAnyRegCC)
7594       setValue(CS.getInstruction(), SDValue(MN, 0));
7595     else
7596       setValue(CS.getInstruction(), Result.first);
7597   }
7598 
7599   // Fixup the consumers of the intrinsic. The chain and glue may be used in the
7600   // call sequence. Furthermore the location of the chain and glue can change
7601   // when the AnyReg calling convention is used and the intrinsic returns a
7602   // value.
7603   if (IsAnyRegCC && HasDef) {
7604     SDValue From[] = {SDValue(Call, 0), SDValue(Call, 1)};
7605     SDValue To[] = {SDValue(MN, 1), SDValue(MN, 2)};
7606     DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
7607   } else
7608     DAG.ReplaceAllUsesWith(Call, MN);
7609   DAG.DeleteNode(Call);
7610 
7611   // Inform the Frame Information that we have a patchpoint in this function.
7612   FuncInfo.MF->getFrameInfo().setHasPatchPoint();
7613 }
7614 
7615 /// Returns an AttributeList representing the attributes applied to the return
7616 /// value of the given call.
7617 static AttributeList getReturnAttrs(TargetLowering::CallLoweringInfo &CLI) {
7618   SmallVector<Attribute::AttrKind, 2> Attrs;
7619   if (CLI.RetSExt)
7620     Attrs.push_back(Attribute::SExt);
7621   if (CLI.RetZExt)
7622     Attrs.push_back(Attribute::ZExt);
7623   if (CLI.IsInReg)
7624     Attrs.push_back(Attribute::InReg);
7625 
7626   return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex,
7627                             Attrs);
7628 }
7629 
7630 /// TargetLowering::LowerCallTo - This is the default LowerCallTo
7631 /// implementation, which just calls LowerCall.
7632 /// FIXME: When all targets are
7633 /// migrated to using LowerCall, this hook should be integrated into SDISel.
7634 std::pair<SDValue, SDValue>
7635 TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
7636   // Handle the incoming return values from the call.
7637   CLI.Ins.clear();
7638   Type *OrigRetTy = CLI.RetTy;
7639   SmallVector<EVT, 4> RetTys;
7640   SmallVector<uint64_t, 4> Offsets;
7641   auto &DL = CLI.DAG.getDataLayout();
7642   ComputeValueVTs(*this, DL, CLI.RetTy, RetTys, &Offsets);
7643 
7644   SmallVector<ISD::OutputArg, 4> Outs;
7645   GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, *this, DL);
7646 
7647   bool CanLowerReturn =
7648       this->CanLowerReturn(CLI.CallConv, CLI.DAG.getMachineFunction(),
7649                            CLI.IsVarArg, Outs, CLI.RetTy->getContext());
7650 
7651   SDValue DemoteStackSlot;
7652   int DemoteStackIdx = -100;
7653   if (!CanLowerReturn) {
7654     // FIXME: equivalent assert?
7655     // assert(!CS.hasInAllocaArgument() &&
7656     //        "sret demotion is incompatible with inalloca");
7657     uint64_t TySize = DL.getTypeAllocSize(CLI.RetTy);
7658     unsigned Align = DL.getPrefTypeAlignment(CLI.RetTy);
7659     MachineFunction &MF = CLI.DAG.getMachineFunction();
7660     DemoteStackIdx = MF.getFrameInfo().CreateStackObject(TySize, Align, false);
7661     Type *StackSlotPtrType = PointerType::getUnqual(CLI.RetTy);
7662 
7663     DemoteStackSlot = CLI.DAG.getFrameIndex(DemoteStackIdx, getFrameIndexTy(DL));
7664     ArgListEntry Entry;
7665     Entry.Node = DemoteStackSlot;
7666     Entry.Ty = StackSlotPtrType;
7667     Entry.IsSExt = false;
7668     Entry.IsZExt = false;
7669     Entry.IsInReg = false;
7670     Entry.IsSRet = true;
7671     Entry.IsNest = false;
7672     Entry.IsByVal = false;
7673     Entry.IsReturned = false;
7674     Entry.IsSwiftSelf = false;
7675     Entry.IsSwiftError = false;
7676     Entry.Alignment = Align;
7677     CLI.getArgs().insert(CLI.getArgs().begin(), Entry);
7678     CLI.RetTy = Type::getVoidTy(CLI.RetTy->getContext());
7679 
7680     // sret demotion isn't compatible with tail-calls, since the sret argument
7681     // points into the callers stack frame.
7682     CLI.IsTailCall = false;
7683   } else {
7684     for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
7685       EVT VT = RetTys[I];
7686       MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), VT);
7687       unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), VT);
7688       for (unsigned i = 0; i != NumRegs; ++i) {
7689         ISD::InputArg MyFlags;
7690         MyFlags.VT = RegisterVT;
7691         MyFlags.ArgVT = VT;
7692         MyFlags.Used = CLI.IsReturnValueUsed;
7693         if (CLI.RetSExt)
7694           MyFlags.Flags.setSExt();
7695         if (CLI.RetZExt)
7696           MyFlags.Flags.setZExt();
7697         if (CLI.IsInReg)
7698           MyFlags.Flags.setInReg();
7699         CLI.Ins.push_back(MyFlags);
7700       }
7701     }
7702   }
7703 
7704   // We push in swifterror return as the last element of CLI.Ins.
7705   ArgListTy &Args = CLI.getArgs();
7706   if (supportSwiftError()) {
7707     for (unsigned i = 0, e = Args.size(); i != e; ++i) {
7708       if (Args[i].IsSwiftError) {
7709         ISD::InputArg MyFlags;
7710         MyFlags.VT = getPointerTy(DL);
7711         MyFlags.ArgVT = EVT(getPointerTy(DL));
7712         MyFlags.Flags.setSwiftError();
7713         CLI.Ins.push_back(MyFlags);
7714       }
7715     }
7716   }
7717 
7718   // Handle all of the outgoing arguments.
7719   CLI.Outs.clear();
7720   CLI.OutVals.clear();
7721   for (unsigned i = 0, e = Args.size(); i != e; ++i) {
7722     SmallVector<EVT, 4> ValueVTs;
7723     ComputeValueVTs(*this, DL, Args[i].Ty, ValueVTs);
7724     Type *FinalType = Args[i].Ty;
7725     if (Args[i].IsByVal)
7726       FinalType = cast<PointerType>(Args[i].Ty)->getElementType();
7727     bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
7728         FinalType, CLI.CallConv, CLI.IsVarArg);
7729     for (unsigned Value = 0, NumValues = ValueVTs.size(); Value != NumValues;
7730          ++Value) {
7731       EVT VT = ValueVTs[Value];
7732       Type *ArgTy = VT.getTypeForEVT(CLI.RetTy->getContext());
7733       SDValue Op = SDValue(Args[i].Node.getNode(),
7734                            Args[i].Node.getResNo() + Value);
7735       ISD::ArgFlagsTy Flags;
7736       unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy);
7737 
7738       if (Args[i].IsZExt)
7739         Flags.setZExt();
7740       if (Args[i].IsSExt)
7741         Flags.setSExt();
7742       if (Args[i].IsInReg) {
7743         // If we are using vectorcall calling convention, a structure that is
7744         // passed InReg - is surely an HVA
7745         if (CLI.CallConv == CallingConv::X86_VectorCall &&
7746             isa<StructType>(FinalType)) {
7747           // The first value of a structure is marked
7748           if (0 == Value)
7749             Flags.setHvaStart();
7750           Flags.setHva();
7751         }
7752         // Set InReg Flag
7753         Flags.setInReg();
7754       }
7755       if (Args[i].IsSRet)
7756         Flags.setSRet();
7757       if (Args[i].IsSwiftSelf)
7758         Flags.setSwiftSelf();
7759       if (Args[i].IsSwiftError)
7760         Flags.setSwiftError();
7761       if (Args[i].IsByVal)
7762         Flags.setByVal();
7763       if (Args[i].IsInAlloca) {
7764         Flags.setInAlloca();
7765         // Set the byval flag for CCAssignFn callbacks that don't know about
7766         // inalloca.  This way we can know how many bytes we should've allocated
7767         // and how many bytes a callee cleanup function will pop.  If we port
7768         // inalloca to more targets, we'll have to add custom inalloca handling
7769         // in the various CC lowering callbacks.
7770         Flags.setByVal();
7771       }
7772       if (Args[i].IsByVal || Args[i].IsInAlloca) {
7773         PointerType *Ty = cast<PointerType>(Args[i].Ty);
7774         Type *ElementTy = Ty->getElementType();
7775         Flags.setByValSize(DL.getTypeAllocSize(ElementTy));
7776         // For ByVal, alignment should come from FE.  BE will guess if this
7777         // info is not there but there are cases it cannot get right.
7778         unsigned FrameAlign;
7779         if (Args[i].Alignment)
7780           FrameAlign = Args[i].Alignment;
7781         else
7782           FrameAlign = getByValTypeAlignment(ElementTy, DL);
7783         Flags.setByValAlign(FrameAlign);
7784       }
7785       if (Args[i].IsNest)
7786         Flags.setNest();
7787       if (NeedsRegBlock)
7788         Flags.setInConsecutiveRegs();
7789       Flags.setOrigAlign(OriginalAlignment);
7790 
7791       MVT PartVT = getRegisterType(CLI.RetTy->getContext(), VT);
7792       unsigned NumParts = getNumRegisters(CLI.RetTy->getContext(), VT);
7793       SmallVector<SDValue, 4> Parts(NumParts);
7794       ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
7795 
7796       if (Args[i].IsSExt)
7797         ExtendKind = ISD::SIGN_EXTEND;
7798       else if (Args[i].IsZExt)
7799         ExtendKind = ISD::ZERO_EXTEND;
7800 
7801       // Conservatively only handle 'returned' on non-vectors for now
7802       if (Args[i].IsReturned && !Op.getValueType().isVector()) {
7803         assert(CLI.RetTy == Args[i].Ty && RetTys.size() == NumValues &&
7804                "unexpected use of 'returned'");
7805         // Before passing 'returned' to the target lowering code, ensure that
7806         // either the register MVT and the actual EVT are the same size or that
7807         // the return value and argument are extended in the same way; in these
7808         // cases it's safe to pass the argument register value unchanged as the
7809         // return register value (although it's at the target's option whether
7810         // to do so)
7811         // TODO: allow code generation to take advantage of partially preserved
7812         // registers rather than clobbering the entire register when the
7813         // parameter extension method is not compatible with the return
7814         // extension method
7815         if ((NumParts * PartVT.getSizeInBits() == VT.getSizeInBits()) ||
7816             (ExtendKind != ISD::ANY_EXTEND && CLI.RetSExt == Args[i].IsSExt &&
7817              CLI.RetZExt == Args[i].IsZExt))
7818           Flags.setReturned();
7819       }
7820 
7821       getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT,
7822                      CLI.CS ? CLI.CS->getInstruction() : nullptr, ExtendKind);
7823 
7824       for (unsigned j = 0; j != NumParts; ++j) {
7825         // if it isn't first piece, alignment must be 1
7826         ISD::OutputArg MyFlags(Flags, Parts[j].getValueType(), VT,
7827                                i < CLI.NumFixedArgs,
7828                                i, j*Parts[j].getValueType().getStoreSize());
7829         if (NumParts > 1 && j == 0)
7830           MyFlags.Flags.setSplit();
7831         else if (j != 0) {
7832           MyFlags.Flags.setOrigAlign(1);
7833           if (j == NumParts - 1)
7834             MyFlags.Flags.setSplitEnd();
7835         }
7836 
7837         CLI.Outs.push_back(MyFlags);
7838         CLI.OutVals.push_back(Parts[j]);
7839       }
7840 
7841       if (NeedsRegBlock && Value == NumValues - 1)
7842         CLI.Outs[CLI.Outs.size() - 1].Flags.setInConsecutiveRegsLast();
7843     }
7844   }
7845 
7846   SmallVector<SDValue, 4> InVals;
7847   CLI.Chain = LowerCall(CLI, InVals);
7848 
7849   // Update CLI.InVals to use outside of this function.
7850   CLI.InVals = InVals;
7851 
7852   // Verify that the target's LowerCall behaved as expected.
7853   assert(CLI.Chain.getNode() && CLI.Chain.getValueType() == MVT::Other &&
7854          "LowerCall didn't return a valid chain!");
7855   assert((!CLI.IsTailCall || InVals.empty()) &&
7856          "LowerCall emitted a return value for a tail call!");
7857   assert((CLI.IsTailCall || InVals.size() == CLI.Ins.size()) &&
7858          "LowerCall didn't emit the correct number of values!");
7859 
7860   // For a tail call, the return value is merely live-out and there aren't
7861   // any nodes in the DAG representing it. Return a special value to
7862   // indicate that a tail call has been emitted and no more Instructions
7863   // should be processed in the current block.
7864   if (CLI.IsTailCall) {
7865     CLI.DAG.setRoot(CLI.Chain);
7866     return std::make_pair(SDValue(), SDValue());
7867   }
7868 
7869 #ifndef NDEBUG
7870   for (unsigned i = 0, e = CLI.Ins.size(); i != e; ++i) {
7871     assert(InVals[i].getNode() && "LowerCall emitted a null value!");
7872     assert(EVT(CLI.Ins[i].VT) == InVals[i].getValueType() &&
7873            "LowerCall emitted a value with the wrong type!");
7874   }
7875 #endif
7876 
7877   SmallVector<SDValue, 4> ReturnValues;
7878   if (!CanLowerReturn) {
7879     // The instruction result is the result of loading from the
7880     // hidden sret parameter.
7881     SmallVector<EVT, 1> PVTs;
7882     Type *PtrRetTy = PointerType::getUnqual(OrigRetTy);
7883 
7884     ComputeValueVTs(*this, DL, PtrRetTy, PVTs);
7885     assert(PVTs.size() == 1 && "Pointers should fit in one register");
7886     EVT PtrVT = PVTs[0];
7887 
7888     unsigned NumValues = RetTys.size();
7889     ReturnValues.resize(NumValues);
7890     SmallVector<SDValue, 4> Chains(NumValues);
7891 
7892     // An aggregate return value cannot wrap around the address space, so
7893     // offsets to its parts don't wrap either.
7894     SDNodeFlags Flags;
7895     Flags.setNoUnsignedWrap(true);
7896 
7897     for (unsigned i = 0; i < NumValues; ++i) {
7898       SDValue Add = CLI.DAG.getNode(ISD::ADD, CLI.DL, PtrVT, DemoteStackSlot,
7899                                     CLI.DAG.getConstant(Offsets[i], CLI.DL,
7900                                                         PtrVT), &Flags);
7901       SDValue L = CLI.DAG.getLoad(
7902           RetTys[i], CLI.DL, CLI.Chain, Add,
7903           MachinePointerInfo::getFixedStack(CLI.DAG.getMachineFunction(),
7904                                             DemoteStackIdx, Offsets[i]),
7905           /* Alignment = */ 1);
7906       ReturnValues[i] = L;
7907       Chains[i] = L.getValue(1);
7908     }
7909 
7910     CLI.Chain = CLI.DAG.getNode(ISD::TokenFactor, CLI.DL, MVT::Other, Chains);
7911   } else {
7912     // Collect the legal value parts into potentially illegal values
7913     // that correspond to the original function's return values.
7914     Optional<ISD::NodeType> AssertOp;
7915     if (CLI.RetSExt)
7916       AssertOp = ISD::AssertSext;
7917     else if (CLI.RetZExt)
7918       AssertOp = ISD::AssertZext;
7919     unsigned CurReg = 0;
7920     for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
7921       EVT VT = RetTys[I];
7922       MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), VT);
7923       unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), VT);
7924 
7925       ReturnValues.push_back(getCopyFromParts(CLI.DAG, CLI.DL, &InVals[CurReg],
7926                                               NumRegs, RegisterVT, VT, nullptr,
7927                                               AssertOp));
7928       CurReg += NumRegs;
7929     }
7930 
7931     // For a function returning void, there is no return value. We can't create
7932     // such a node, so we just return a null return value in that case. In
7933     // that case, nothing will actually look at the value.
7934     if (ReturnValues.empty())
7935       return std::make_pair(SDValue(), CLI.Chain);
7936   }
7937 
7938   SDValue Res = CLI.DAG.getNode(ISD::MERGE_VALUES, CLI.DL,
7939                                 CLI.DAG.getVTList(RetTys), ReturnValues);
7940   return std::make_pair(Res, CLI.Chain);
7941 }
7942 
7943 void TargetLowering::LowerOperationWrapper(SDNode *N,
7944                                            SmallVectorImpl<SDValue> &Results,
7945                                            SelectionDAG &DAG) const {
7946   if (SDValue Res = LowerOperation(SDValue(N, 0), DAG))
7947     Results.push_back(Res);
7948 }
7949 
7950 SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
7951   llvm_unreachable("LowerOperation not implemented for this target!");
7952 }
7953 
7954 void
7955 SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) {
7956   SDValue Op = getNonRegisterValue(V);
7957   assert((Op.getOpcode() != ISD::CopyFromReg ||
7958           cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
7959          "Copy from a reg to the same reg!");
7960   assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg");
7961 
7962   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7963   RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg,
7964                    V->getType());
7965   SDValue Chain = DAG.getEntryNode();
7966 
7967   ISD::NodeType ExtendType = (FuncInfo.PreferredExtendType.find(V) ==
7968                               FuncInfo.PreferredExtendType.end())
7969                                  ? ISD::ANY_EXTEND
7970                                  : FuncInfo.PreferredExtendType[V];
7971   RFV.getCopyToRegs(Op, DAG, getCurSDLoc(), Chain, nullptr, V, ExtendType);
7972   PendingExports.push_back(Chain);
7973 }
7974 
7975 #include "llvm/CodeGen/SelectionDAGISel.h"
7976 
7977 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
7978 /// entry block, return true.  This includes arguments used by switches, since
7979 /// the switch may expand into multiple basic blocks.
7980 static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) {
7981   // With FastISel active, we may be splitting blocks, so force creation
7982   // of virtual registers for all non-dead arguments.
7983   if (FastISel)
7984     return A->use_empty();
7985 
7986   const BasicBlock &Entry = A->getParent()->front();
7987   for (const User *U : A->users())
7988     if (cast<Instruction>(U)->getParent() != &Entry || isa<SwitchInst>(U))
7989       return false;  // Use not in entry block.
7990 
7991   return true;
7992 }
7993 
7994 typedef DenseMap<const Argument *,
7995                  std::pair<const AllocaInst *, const StoreInst *>>
7996     ArgCopyElisionMapTy;
7997 
7998 /// Scan the entry block of the function in FuncInfo for arguments that look
7999 /// like copies into a local alloca. Record any copied arguments in
8000 /// ArgCopyElisionCandidates.
8001 static void
8002 findArgumentCopyElisionCandidates(const DataLayout &DL,
8003                                   FunctionLoweringInfo *FuncInfo,
8004                                   ArgCopyElisionMapTy &ArgCopyElisionCandidates) {
8005   // Record the state of every static alloca used in the entry block. Argument
8006   // allocas are all used in the entry block, so we need approximately as many
8007   // entries as we have arguments.
8008   enum StaticAllocaInfo { Unknown, Clobbered, Elidable };
8009   SmallDenseMap<const AllocaInst *, StaticAllocaInfo, 8> StaticAllocas;
8010   unsigned NumArgs = FuncInfo->Fn->arg_size();
8011   StaticAllocas.reserve(NumArgs * 2);
8012 
8013   auto GetInfoIfStaticAlloca = [&](const Value *V) -> StaticAllocaInfo * {
8014     if (!V)
8015       return nullptr;
8016     V = V->stripPointerCasts();
8017     const auto *AI = dyn_cast<AllocaInst>(V);
8018     if (!AI || !AI->isStaticAlloca() || !FuncInfo->StaticAllocaMap.count(AI))
8019       return nullptr;
8020     auto Iter = StaticAllocas.insert({AI, Unknown});
8021     return &Iter.first->second;
8022   };
8023 
8024   // Look for stores of arguments to static allocas. Look through bitcasts and
8025   // GEPs to handle type coercions, as long as the alloca is fully initialized
8026   // by the store. Any non-store use of an alloca escapes it and any subsequent
8027   // unanalyzed store might write it.
8028   // FIXME: Handle structs initialized with multiple stores.
8029   for (const Instruction &I : FuncInfo->Fn->getEntryBlock()) {
8030     // Look for stores, and handle non-store uses conservatively.
8031     const auto *SI = dyn_cast<StoreInst>(&I);
8032     if (!SI) {
8033       // We will look through cast uses, so ignore them completely.
8034       if (I.isCast())
8035         continue;
8036       // Ignore debug info intrinsics, they don't escape or store to allocas.
8037       if (isa<DbgInfoIntrinsic>(I))
8038         continue;
8039       // This is an unknown instruction. Assume it escapes or writes to all
8040       // static alloca operands.
8041       for (const Use &U : I.operands()) {
8042         if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(U))
8043           *Info = StaticAllocaInfo::Clobbered;
8044       }
8045       continue;
8046     }
8047 
8048     // If the stored value is a static alloca, mark it as escaped.
8049     if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(SI->getValueOperand()))
8050       *Info = StaticAllocaInfo::Clobbered;
8051 
8052     // Check if the destination is a static alloca.
8053     const Value *Dst = SI->getPointerOperand()->stripPointerCasts();
8054     StaticAllocaInfo *Info = GetInfoIfStaticAlloca(Dst);
8055     if (!Info)
8056       continue;
8057     const AllocaInst *AI = cast<AllocaInst>(Dst);
8058 
8059     // Skip allocas that have been initialized or clobbered.
8060     if (*Info != StaticAllocaInfo::Unknown)
8061       continue;
8062 
8063     // Check if the stored value is an argument, and that this store fully
8064     // initializes the alloca. Don't elide copies from the same argument twice.
8065     const Value *Val = SI->getValueOperand()->stripPointerCasts();
8066     const auto *Arg = dyn_cast<Argument>(Val);
8067     if (!Arg || Arg->hasInAllocaAttr() || Arg->hasByValAttr() ||
8068         Arg->getType()->isEmptyTy() ||
8069         DL.getTypeStoreSize(Arg->getType()) !=
8070             DL.getTypeAllocSize(AI->getAllocatedType()) ||
8071         ArgCopyElisionCandidates.count(Arg)) {
8072       *Info = StaticAllocaInfo::Clobbered;
8073       continue;
8074     }
8075 
8076     DEBUG(dbgs() << "Found argument copy elision candidate: " << *AI << '\n');
8077 
8078     // Mark this alloca and store for argument copy elision.
8079     *Info = StaticAllocaInfo::Elidable;
8080     ArgCopyElisionCandidates.insert({Arg, {AI, SI}});
8081 
8082     // Stop scanning if we've seen all arguments. This will happen early in -O0
8083     // builds, which is useful, because -O0 builds have large entry blocks and
8084     // many allocas.
8085     if (ArgCopyElisionCandidates.size() == NumArgs)
8086       break;
8087   }
8088 }
8089 
8090 /// Try to elide argument copies from memory into a local alloca. Succeeds if
8091 /// ArgVal is a load from a suitable fixed stack object.
8092 static void tryToElideArgumentCopy(
8093     FunctionLoweringInfo *FuncInfo, SmallVectorImpl<SDValue> &Chains,
8094     DenseMap<int, int> &ArgCopyElisionFrameIndexMap,
8095     SmallPtrSetImpl<const Instruction *> &ElidedArgCopyInstrs,
8096     ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg,
8097     SDValue ArgVal, bool &ArgHasUses) {
8098   // Check if this is a load from a fixed stack object.
8099   auto *LNode = dyn_cast<LoadSDNode>(ArgVal);
8100   if (!LNode)
8101     return;
8102   auto *FINode = dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode());
8103   if (!FINode)
8104     return;
8105 
8106   // Check that the fixed stack object is the right size and alignment.
8107   // Look at the alignment that the user wrote on the alloca instead of looking
8108   // at the stack object.
8109   auto ArgCopyIter = ArgCopyElisionCandidates.find(&Arg);
8110   assert(ArgCopyIter != ArgCopyElisionCandidates.end());
8111   const AllocaInst *AI = ArgCopyIter->second.first;
8112   int FixedIndex = FINode->getIndex();
8113   int &AllocaIndex = FuncInfo->StaticAllocaMap[AI];
8114   int OldIndex = AllocaIndex;
8115   MachineFrameInfo &MFI = FuncInfo->MF->getFrameInfo();
8116   if (MFI.getObjectSize(FixedIndex) != MFI.getObjectSize(OldIndex)) {
8117     DEBUG(dbgs() << "  argument copy elision failed due to bad fixed stack "
8118                     "object size\n");
8119     return;
8120   }
8121   unsigned RequiredAlignment = AI->getAlignment();
8122   if (!RequiredAlignment) {
8123     RequiredAlignment = FuncInfo->MF->getDataLayout().getABITypeAlignment(
8124         AI->getAllocatedType());
8125   }
8126   if (MFI.getObjectAlignment(FixedIndex) < RequiredAlignment) {
8127     DEBUG(dbgs() << "  argument copy elision failed: alignment of alloca "
8128                     "greater than stack argument alignment ("
8129                  << RequiredAlignment << " vs "
8130                  << MFI.getObjectAlignment(FixedIndex) << ")\n");
8131     return;
8132   }
8133 
8134   // Perform the elision. Delete the old stack object and replace its only use
8135   // in the variable info map. Mark the stack object as mutable.
8136   DEBUG({
8137     dbgs() << "Eliding argument copy from " << Arg << " to " << *AI << '\n'
8138            << "  Replacing frame index " << OldIndex << " with " << FixedIndex
8139            << '\n';
8140   });
8141   MFI.RemoveStackObject(OldIndex);
8142   MFI.setIsImmutableObjectIndex(FixedIndex, false);
8143   AllocaIndex = FixedIndex;
8144   ArgCopyElisionFrameIndexMap.insert({OldIndex, FixedIndex});
8145   Chains.push_back(ArgVal.getValue(1));
8146 
8147   // Avoid emitting code for the store implementing the copy.
8148   const StoreInst *SI = ArgCopyIter->second.second;
8149   ElidedArgCopyInstrs.insert(SI);
8150 
8151   // Check for uses of the argument again so that we can avoid exporting ArgVal
8152   // if it is't used by anything other than the store.
8153   for (const Value *U : Arg.users()) {
8154     if (U != SI) {
8155       ArgHasUses = true;
8156       break;
8157     }
8158   }
8159 }
8160 
8161 void SelectionDAGISel::LowerArguments(const Function &F) {
8162   SelectionDAG &DAG = SDB->DAG;
8163   SDLoc dl = SDB->getCurSDLoc();
8164   const DataLayout &DL = DAG.getDataLayout();
8165   SmallVector<ISD::InputArg, 16> Ins;
8166 
8167   if (!FuncInfo->CanLowerReturn) {
8168     // Put in an sret pointer parameter before all the other parameters.
8169     SmallVector<EVT, 1> ValueVTs;
8170     ComputeValueVTs(*TLI, DAG.getDataLayout(),
8171                     PointerType::getUnqual(F.getReturnType()), ValueVTs);
8172 
8173     // NOTE: Assuming that a pointer will never break down to more than one VT
8174     // or one register.
8175     ISD::ArgFlagsTy Flags;
8176     Flags.setSRet();
8177     MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVTs[0]);
8178     ISD::InputArg RetArg(Flags, RegisterVT, ValueVTs[0], true,
8179                          ISD::InputArg::NoArgIndex, 0);
8180     Ins.push_back(RetArg);
8181   }
8182 
8183   // Look for stores of arguments to static allocas. Mark such arguments with a
8184   // flag to ask the target to give us the memory location of that argument if
8185   // available.
8186   ArgCopyElisionMapTy ArgCopyElisionCandidates;
8187   findArgumentCopyElisionCandidates(DL, FuncInfo, ArgCopyElisionCandidates);
8188 
8189   // Set up the incoming argument description vector.
8190   unsigned Idx = 0;
8191   for (const Argument &Arg : F.args()) {
8192     ++Idx;
8193     SmallVector<EVT, 4> ValueVTs;
8194     ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
8195     bool isArgValueUsed = !Arg.use_empty();
8196     unsigned PartBase = 0;
8197     Type *FinalType = Arg.getType();
8198     if (F.getAttributes().hasAttribute(Idx, Attribute::ByVal))
8199       FinalType = cast<PointerType>(FinalType)->getElementType();
8200     bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
8201         FinalType, F.getCallingConv(), F.isVarArg());
8202     for (unsigned Value = 0, NumValues = ValueVTs.size();
8203          Value != NumValues; ++Value) {
8204       EVT VT = ValueVTs[Value];
8205       Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
8206       ISD::ArgFlagsTy Flags;
8207       unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy);
8208 
8209       if (F.getAttributes().hasAttribute(Idx, Attribute::ZExt))
8210         Flags.setZExt();
8211       if (F.getAttributes().hasAttribute(Idx, Attribute::SExt))
8212         Flags.setSExt();
8213       if (F.getAttributes().hasAttribute(Idx, Attribute::InReg)) {
8214         // If we are using vectorcall calling convention, a structure that is
8215         // passed InReg - is surely an HVA
8216         if (F.getCallingConv() == CallingConv::X86_VectorCall &&
8217             isa<StructType>(Arg.getType())) {
8218           // The first value of a structure is marked
8219           if (0 == Value)
8220             Flags.setHvaStart();
8221           Flags.setHva();
8222         }
8223         // Set InReg Flag
8224         Flags.setInReg();
8225       }
8226       if (F.getAttributes().hasAttribute(Idx, Attribute::StructRet))
8227         Flags.setSRet();
8228       if (F.getAttributes().hasAttribute(Idx, Attribute::SwiftSelf))
8229         Flags.setSwiftSelf();
8230       if (F.getAttributes().hasAttribute(Idx, Attribute::SwiftError))
8231         Flags.setSwiftError();
8232       if (F.getAttributes().hasAttribute(Idx, Attribute::ByVal))
8233         Flags.setByVal();
8234       if (F.getAttributes().hasAttribute(Idx, Attribute::InAlloca)) {
8235         Flags.setInAlloca();
8236         // Set the byval flag for CCAssignFn callbacks that don't know about
8237         // inalloca.  This way we can know how many bytes we should've allocated
8238         // and how many bytes a callee cleanup function will pop.  If we port
8239         // inalloca to more targets, we'll have to add custom inalloca handling
8240         // in the various CC lowering callbacks.
8241         Flags.setByVal();
8242       }
8243       if (F.getCallingConv() == CallingConv::X86_INTR) {
8244         // IA Interrupt passes frame (1st parameter) by value in the stack.
8245         if (Idx == 1)
8246           Flags.setByVal();
8247       }
8248       if (Flags.isByVal() || Flags.isInAlloca()) {
8249         PointerType *Ty = cast<PointerType>(Arg.getType());
8250         Type *ElementTy = Ty->getElementType();
8251         Flags.setByValSize(DL.getTypeAllocSize(ElementTy));
8252         // For ByVal, alignment should be passed from FE.  BE will guess if
8253         // this info is not there but there are cases it cannot get right.
8254         unsigned FrameAlign;
8255         if (F.getParamAlignment(Idx))
8256           FrameAlign = F.getParamAlignment(Idx);
8257         else
8258           FrameAlign = TLI->getByValTypeAlignment(ElementTy, DL);
8259         Flags.setByValAlign(FrameAlign);
8260       }
8261       if (F.getAttributes().hasAttribute(Idx, Attribute::Nest))
8262         Flags.setNest();
8263       if (NeedsRegBlock)
8264         Flags.setInConsecutiveRegs();
8265       Flags.setOrigAlign(OriginalAlignment);
8266       if (ArgCopyElisionCandidates.count(&Arg))
8267         Flags.setCopyElisionCandidate();
8268 
8269       MVT RegisterVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
8270       unsigned NumRegs = TLI->getNumRegisters(*CurDAG->getContext(), VT);
8271       for (unsigned i = 0; i != NumRegs; ++i) {
8272         ISD::InputArg MyFlags(Flags, RegisterVT, VT, isArgValueUsed,
8273                               Idx-1, PartBase+i*RegisterVT.getStoreSize());
8274         if (NumRegs > 1 && i == 0)
8275           MyFlags.Flags.setSplit();
8276         // if it isn't first piece, alignment must be 1
8277         else if (i > 0) {
8278           MyFlags.Flags.setOrigAlign(1);
8279           if (i == NumRegs - 1)
8280             MyFlags.Flags.setSplitEnd();
8281         }
8282         Ins.push_back(MyFlags);
8283       }
8284       if (NeedsRegBlock && Value == NumValues - 1)
8285         Ins[Ins.size() - 1].Flags.setInConsecutiveRegsLast();
8286       PartBase += VT.getStoreSize();
8287     }
8288   }
8289 
8290   // Call the target to set up the argument values.
8291   SmallVector<SDValue, 8> InVals;
8292   SDValue NewRoot = TLI->LowerFormalArguments(
8293       DAG.getRoot(), F.getCallingConv(), F.isVarArg(), Ins, dl, DAG, InVals);
8294 
8295   // Verify that the target's LowerFormalArguments behaved as expected.
8296   assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other &&
8297          "LowerFormalArguments didn't return a valid chain!");
8298   assert(InVals.size() == Ins.size() &&
8299          "LowerFormalArguments didn't emit the correct number of values!");
8300   DEBUG({
8301       for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
8302         assert(InVals[i].getNode() &&
8303                "LowerFormalArguments emitted a null value!");
8304         assert(EVT(Ins[i].VT) == InVals[i].getValueType() &&
8305                "LowerFormalArguments emitted a value with the wrong type!");
8306       }
8307     });
8308 
8309   // Update the DAG with the new chain value resulting from argument lowering.
8310   DAG.setRoot(NewRoot);
8311 
8312   // Set up the argument values.
8313   unsigned i = 0;
8314   Idx = 0;
8315   if (!FuncInfo->CanLowerReturn) {
8316     // Create a virtual register for the sret pointer, and put in a copy
8317     // from the sret argument into it.
8318     SmallVector<EVT, 1> ValueVTs;
8319     ComputeValueVTs(*TLI, DAG.getDataLayout(),
8320                     PointerType::getUnqual(F.getReturnType()), ValueVTs);
8321     MVT VT = ValueVTs[0].getSimpleVT();
8322     MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
8323     Optional<ISD::NodeType> AssertOp = None;
8324     SDValue ArgValue = getCopyFromParts(DAG, dl, &InVals[0], 1,
8325                                         RegVT, VT, nullptr, AssertOp);
8326 
8327     MachineFunction& MF = SDB->DAG.getMachineFunction();
8328     MachineRegisterInfo& RegInfo = MF.getRegInfo();
8329     unsigned SRetReg = RegInfo.createVirtualRegister(TLI->getRegClassFor(RegVT));
8330     FuncInfo->DemoteRegister = SRetReg;
8331     NewRoot =
8332         SDB->DAG.getCopyToReg(NewRoot, SDB->getCurSDLoc(), SRetReg, ArgValue);
8333     DAG.setRoot(NewRoot);
8334 
8335     // i indexes lowered arguments.  Bump it past the hidden sret argument.
8336     // Idx indexes LLVM arguments.  Don't touch it.
8337     ++i;
8338   }
8339 
8340   SmallVector<SDValue, 4> Chains;
8341   DenseMap<int, int> ArgCopyElisionFrameIndexMap;
8342   for (const Argument &Arg : F.args()) {
8343     ++Idx;
8344     SmallVector<SDValue, 4> ArgValues;
8345     SmallVector<EVT, 4> ValueVTs;
8346     ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
8347     unsigned NumValues = ValueVTs.size();
8348     if (NumValues == 0)
8349       continue;
8350 
8351     bool ArgHasUses = !Arg.use_empty();
8352 
8353     // Elide the copying store if the target loaded this argument from a
8354     // suitable fixed stack object.
8355     if (Ins[i].Flags.isCopyElisionCandidate()) {
8356       tryToElideArgumentCopy(FuncInfo, Chains, ArgCopyElisionFrameIndexMap,
8357                              ElidedArgCopyInstrs, ArgCopyElisionCandidates, Arg,
8358                              InVals[i], ArgHasUses);
8359     }
8360 
8361     // If this argument is unused then remember its value. It is used to generate
8362     // debugging information.
8363     bool isSwiftErrorArg =
8364         TLI->supportSwiftError() &&
8365         F.getAttributes().hasAttribute(Idx, Attribute::SwiftError);
8366     if (!ArgHasUses && !isSwiftErrorArg) {
8367       SDB->setUnusedArgValue(&Arg, InVals[i]);
8368 
8369       // Also remember any frame index for use in FastISel.
8370       if (FrameIndexSDNode *FI =
8371           dyn_cast<FrameIndexSDNode>(InVals[i].getNode()))
8372         FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
8373     }
8374 
8375     for (unsigned Val = 0; Val != NumValues; ++Val) {
8376       EVT VT = ValueVTs[Val];
8377       MVT PartVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
8378       unsigned NumParts = TLI->getNumRegisters(*CurDAG->getContext(), VT);
8379 
8380       // Even an apparant 'unused' swifterror argument needs to be returned. So
8381       // we do generate a copy for it that can be used on return from the
8382       // function.
8383       if (ArgHasUses || isSwiftErrorArg) {
8384         Optional<ISD::NodeType> AssertOp;
8385         if (F.getAttributes().hasAttribute(Idx, Attribute::SExt))
8386           AssertOp = ISD::AssertSext;
8387         else if (F.getAttributes().hasAttribute(Idx, Attribute::ZExt))
8388           AssertOp = ISD::AssertZext;
8389 
8390         ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], NumParts,
8391                                              PartVT, VT, nullptr, AssertOp));
8392       }
8393 
8394       i += NumParts;
8395     }
8396 
8397     // We don't need to do anything else for unused arguments.
8398     if (ArgValues.empty())
8399       continue;
8400 
8401     // Note down frame index.
8402     if (FrameIndexSDNode *FI =
8403         dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode()))
8404       FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
8405 
8406     SDValue Res = DAG.getMergeValues(makeArrayRef(ArgValues.data(), NumValues),
8407                                      SDB->getCurSDLoc());
8408 
8409     SDB->setValue(&Arg, Res);
8410     if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::BUILD_PAIR) {
8411       if (LoadSDNode *LNode =
8412           dyn_cast<LoadSDNode>(Res.getOperand(0).getNode()))
8413         if (FrameIndexSDNode *FI =
8414             dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
8415         FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
8416     }
8417 
8418     // Update the SwiftErrorVRegDefMap.
8419     if (Res.getOpcode() == ISD::CopyFromReg && isSwiftErrorArg) {
8420       unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
8421       if (TargetRegisterInfo::isVirtualRegister(Reg))
8422         FuncInfo->setCurrentSwiftErrorVReg(FuncInfo->MBB,
8423                                            FuncInfo->SwiftErrorArg, Reg);
8424     }
8425 
8426     // If this argument is live outside of the entry block, insert a copy from
8427     // wherever we got it to the vreg that other BB's will reference it as.
8428     if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::CopyFromReg) {
8429       // If we can, though, try to skip creating an unnecessary vreg.
8430       // FIXME: This isn't very clean... it would be nice to make this more
8431       // general.  It's also subtly incompatible with the hacks FastISel
8432       // uses with vregs.
8433       unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
8434       if (TargetRegisterInfo::isVirtualRegister(Reg)) {
8435         FuncInfo->ValueMap[&Arg] = Reg;
8436         continue;
8437       }
8438     }
8439     if (!isOnlyUsedInEntryBlock(&Arg, TM.Options.EnableFastISel)) {
8440       FuncInfo->InitializeRegForValue(&Arg);
8441       SDB->CopyToExportRegsIfNeeded(&Arg);
8442     }
8443   }
8444 
8445   if (!Chains.empty()) {
8446     Chains.push_back(NewRoot);
8447     NewRoot = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
8448   }
8449 
8450   DAG.setRoot(NewRoot);
8451 
8452   assert(i == InVals.size() && "Argument register count mismatch!");
8453 
8454   // If any argument copy elisions occurred and we have debug info, update the
8455   // stale frame indices used in the dbg.declare variable info table.
8456   MachineFunction::VariableDbgInfoMapTy &DbgDeclareInfo = MF->getVariableDbgInfo();
8457   if (!DbgDeclareInfo.empty() && !ArgCopyElisionFrameIndexMap.empty()) {
8458     for (MachineFunction::VariableDbgInfo &VI : DbgDeclareInfo) {
8459       auto I = ArgCopyElisionFrameIndexMap.find(VI.Slot);
8460       if (I != ArgCopyElisionFrameIndexMap.end())
8461         VI.Slot = I->second;
8462     }
8463   }
8464 
8465   // Finally, if the target has anything special to do, allow it to do so.
8466   EmitFunctionEntryCode();
8467 }
8468 
8469 /// Handle PHI nodes in successor blocks.  Emit code into the SelectionDAG to
8470 /// ensure constants are generated when needed.  Remember the virtual registers
8471 /// that need to be added to the Machine PHI nodes as input.  We cannot just
8472 /// directly add them, because expansion might result in multiple MBB's for one
8473 /// BB.  As such, the start of the BB might correspond to a different MBB than
8474 /// the end.
8475 ///
8476 void
8477 SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
8478   const TerminatorInst *TI = LLVMBB->getTerminator();
8479 
8480   SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
8481 
8482   // Check PHI nodes in successors that expect a value to be available from this
8483   // block.
8484   for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
8485     const BasicBlock *SuccBB = TI->getSuccessor(succ);
8486     if (!isa<PHINode>(SuccBB->begin())) continue;
8487     MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
8488 
8489     // If this terminator has multiple identical successors (common for
8490     // switches), only handle each succ once.
8491     if (!SuccsHandled.insert(SuccMBB).second)
8492       continue;
8493 
8494     MachineBasicBlock::iterator MBBI = SuccMBB->begin();
8495 
8496     // At this point we know that there is a 1-1 correspondence between LLVM PHI
8497     // nodes and Machine PHI nodes, but the incoming operands have not been
8498     // emitted yet.
8499     for (BasicBlock::const_iterator I = SuccBB->begin();
8500          const PHINode *PN = dyn_cast<PHINode>(I); ++I) {
8501       // Ignore dead phi's.
8502       if (PN->use_empty()) continue;
8503 
8504       // Skip empty types
8505       if (PN->getType()->isEmptyTy())
8506         continue;
8507 
8508       unsigned Reg;
8509       const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
8510 
8511       if (const Constant *C = dyn_cast<Constant>(PHIOp)) {
8512         unsigned &RegOut = ConstantsOut[C];
8513         if (RegOut == 0) {
8514           RegOut = FuncInfo.CreateRegs(C->getType());
8515           CopyValueToVirtualRegister(C, RegOut);
8516         }
8517         Reg = RegOut;
8518       } else {
8519         DenseMap<const Value *, unsigned>::iterator I =
8520           FuncInfo.ValueMap.find(PHIOp);
8521         if (I != FuncInfo.ValueMap.end())
8522           Reg = I->second;
8523         else {
8524           assert(isa<AllocaInst>(PHIOp) &&
8525                  FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
8526                  "Didn't codegen value into a register!??");
8527           Reg = FuncInfo.CreateRegs(PHIOp->getType());
8528           CopyValueToVirtualRegister(PHIOp, Reg);
8529         }
8530       }
8531 
8532       // Remember that this register needs to added to the machine PHI node as
8533       // the input for this MBB.
8534       SmallVector<EVT, 4> ValueVTs;
8535       const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8536       ComputeValueVTs(TLI, DAG.getDataLayout(), PN->getType(), ValueVTs);
8537       for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
8538         EVT VT = ValueVTs[vti];
8539         unsigned NumRegisters = TLI.getNumRegisters(*DAG.getContext(), VT);
8540         for (unsigned i = 0, e = NumRegisters; i != e; ++i)
8541           FuncInfo.PHINodesToUpdate.push_back(
8542               std::make_pair(&*MBBI++, Reg + i));
8543         Reg += NumRegisters;
8544       }
8545     }
8546   }
8547 
8548   ConstantsOut.clear();
8549 }
8550 
8551 /// Add a successor MBB to ParentMBB< creating a new MachineBB for BB if SuccMBB
8552 /// is 0.
8553 MachineBasicBlock *
8554 SelectionDAGBuilder::StackProtectorDescriptor::
8555 AddSuccessorMBB(const BasicBlock *BB,
8556                 MachineBasicBlock *ParentMBB,
8557                 bool IsLikely,
8558                 MachineBasicBlock *SuccMBB) {
8559   // If SuccBB has not been created yet, create it.
8560   if (!SuccMBB) {
8561     MachineFunction *MF = ParentMBB->getParent();
8562     MachineFunction::iterator BBI(ParentMBB);
8563     SuccMBB = MF->CreateMachineBasicBlock(BB);
8564     MF->insert(++BBI, SuccMBB);
8565   }
8566   // Add it as a successor of ParentMBB.
8567   ParentMBB->addSuccessor(
8568       SuccMBB, BranchProbabilityInfo::getBranchProbStackProtector(IsLikely));
8569   return SuccMBB;
8570 }
8571 
8572 MachineBasicBlock *SelectionDAGBuilder::NextBlock(MachineBasicBlock *MBB) {
8573   MachineFunction::iterator I(MBB);
8574   if (++I == FuncInfo.MF->end())
8575     return nullptr;
8576   return &*I;
8577 }
8578 
8579 /// During lowering new call nodes can be created (such as memset, etc.).
8580 /// Those will become new roots of the current DAG, but complications arise
8581 /// when they are tail calls. In such cases, the call lowering will update
8582 /// the root, but the builder still needs to know that a tail call has been
8583 /// lowered in order to avoid generating an additional return.
8584 void SelectionDAGBuilder::updateDAGForMaybeTailCall(SDValue MaybeTC) {
8585   // If the node is null, we do have a tail call.
8586   if (MaybeTC.getNode() != nullptr)
8587     DAG.setRoot(MaybeTC);
8588   else
8589     HasTailCall = true;
8590 }
8591 
8592 bool SelectionDAGBuilder::isDense(const CaseClusterVector &Clusters,
8593                                   const SmallVectorImpl<unsigned> &TotalCases,
8594                                   unsigned First, unsigned Last,
8595                                   unsigned Density) const {
8596   assert(Last >= First);
8597   assert(TotalCases[Last] >= TotalCases[First]);
8598 
8599   const APInt &LowCase = Clusters[First].Low->getValue();
8600   const APInt &HighCase = Clusters[Last].High->getValue();
8601   assert(LowCase.getBitWidth() == HighCase.getBitWidth());
8602 
8603   // FIXME: A range of consecutive cases has 100% density, but only requires one
8604   // comparison to lower. We should discriminate against such consecutive ranges
8605   // in jump tables.
8606 
8607   uint64_t Diff = (HighCase - LowCase).getLimitedValue((UINT64_MAX - 1) / 100);
8608   uint64_t Range = Diff + 1;
8609 
8610   uint64_t NumCases =
8611       TotalCases[Last] - (First == 0 ? 0 : TotalCases[First - 1]);
8612 
8613   assert(NumCases < UINT64_MAX / 100);
8614   assert(Range >= NumCases);
8615 
8616   return NumCases * 100 >= Range * Density;
8617 }
8618 
8619 static inline bool areJTsAllowed(const TargetLowering &TLI,
8620                                  const SwitchInst *SI) {
8621   const Function *Fn = SI->getParent()->getParent();
8622   if (Fn->getFnAttribute("no-jump-tables").getValueAsString() == "true")
8623     return false;
8624 
8625   return TLI.isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
8626          TLI.isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
8627 }
8628 
8629 bool SelectionDAGBuilder::buildJumpTable(const CaseClusterVector &Clusters,
8630                                          unsigned First, unsigned Last,
8631                                          const SwitchInst *SI,
8632                                          MachineBasicBlock *DefaultMBB,
8633                                          CaseCluster &JTCluster) {
8634   assert(First <= Last);
8635 
8636   auto Prob = BranchProbability::getZero();
8637   unsigned NumCmps = 0;
8638   std::vector<MachineBasicBlock*> Table;
8639   DenseMap<MachineBasicBlock*, BranchProbability> JTProbs;
8640 
8641   // Initialize probabilities in JTProbs.
8642   for (unsigned I = First; I <= Last; ++I)
8643     JTProbs[Clusters[I].MBB] = BranchProbability::getZero();
8644 
8645   for (unsigned I = First; I <= Last; ++I) {
8646     assert(Clusters[I].Kind == CC_Range);
8647     Prob += Clusters[I].Prob;
8648     const APInt &Low = Clusters[I].Low->getValue();
8649     const APInt &High = Clusters[I].High->getValue();
8650     NumCmps += (Low == High) ? 1 : 2;
8651     if (I != First) {
8652       // Fill the gap between this and the previous cluster.
8653       const APInt &PreviousHigh = Clusters[I - 1].High->getValue();
8654       assert(PreviousHigh.slt(Low));
8655       uint64_t Gap = (Low - PreviousHigh).getLimitedValue() - 1;
8656       for (uint64_t J = 0; J < Gap; J++)
8657         Table.push_back(DefaultMBB);
8658     }
8659     uint64_t ClusterSize = (High - Low).getLimitedValue() + 1;
8660     for (uint64_t J = 0; J < ClusterSize; ++J)
8661       Table.push_back(Clusters[I].MBB);
8662     JTProbs[Clusters[I].MBB] += Clusters[I].Prob;
8663   }
8664 
8665   unsigned NumDests = JTProbs.size();
8666   if (isSuitableForBitTests(NumDests, NumCmps,
8667                             Clusters[First].Low->getValue(),
8668                             Clusters[Last].High->getValue())) {
8669     // Clusters[First..Last] should be lowered as bit tests instead.
8670     return false;
8671   }
8672 
8673   // Create the MBB that will load from and jump through the table.
8674   // Note: We create it here, but it's not inserted into the function yet.
8675   MachineFunction *CurMF = FuncInfo.MF;
8676   MachineBasicBlock *JumpTableMBB =
8677       CurMF->CreateMachineBasicBlock(SI->getParent());
8678 
8679   // Add successors. Note: use table order for determinism.
8680   SmallPtrSet<MachineBasicBlock *, 8> Done;
8681   for (MachineBasicBlock *Succ : Table) {
8682     if (Done.count(Succ))
8683       continue;
8684     addSuccessorWithProb(JumpTableMBB, Succ, JTProbs[Succ]);
8685     Done.insert(Succ);
8686   }
8687   JumpTableMBB->normalizeSuccProbs();
8688 
8689   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8690   unsigned JTI = CurMF->getOrCreateJumpTableInfo(TLI.getJumpTableEncoding())
8691                      ->createJumpTableIndex(Table);
8692 
8693   // Set up the jump table info.
8694   JumpTable JT(-1U, JTI, JumpTableMBB, nullptr);
8695   JumpTableHeader JTH(Clusters[First].Low->getValue(),
8696                       Clusters[Last].High->getValue(), SI->getCondition(),
8697                       nullptr, false);
8698   JTCases.emplace_back(std::move(JTH), std::move(JT));
8699 
8700   JTCluster = CaseCluster::jumpTable(Clusters[First].Low, Clusters[Last].High,
8701                                      JTCases.size() - 1, Prob);
8702   return true;
8703 }
8704 
8705 void SelectionDAGBuilder::findJumpTables(CaseClusterVector &Clusters,
8706                                          const SwitchInst *SI,
8707                                          MachineBasicBlock *DefaultMBB) {
8708 #ifndef NDEBUG
8709   // Clusters must be non-empty, sorted, and only contain Range clusters.
8710   assert(!Clusters.empty());
8711   for (CaseCluster &C : Clusters)
8712     assert(C.Kind == CC_Range);
8713   for (unsigned i = 1, e = Clusters.size(); i < e; ++i)
8714     assert(Clusters[i - 1].High->getValue().slt(Clusters[i].Low->getValue()));
8715 #endif
8716 
8717   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8718   if (!areJTsAllowed(TLI, SI))
8719     return;
8720 
8721   const bool OptForSize = DefaultMBB->getParent()->getFunction()->optForSize();
8722 
8723   const int64_t N = Clusters.size();
8724   const unsigned MinJumpTableEntries = TLI.getMinimumJumpTableEntries();
8725   const unsigned SmallNumberOfEntries = MinJumpTableEntries / 2;
8726   const unsigned MaxJumpTableSize =
8727                    OptForSize || TLI.getMaximumJumpTableSize() == 0
8728                    ? UINT_MAX : TLI.getMaximumJumpTableSize();
8729 
8730   if (N < 2 || N < MinJumpTableEntries)
8731     return;
8732 
8733   // TotalCases[i]: Total nbr of cases in Clusters[0..i].
8734   SmallVector<unsigned, 8> TotalCases(N);
8735   for (unsigned i = 0; i < N; ++i) {
8736     const APInt &Hi = Clusters[i].High->getValue();
8737     const APInt &Lo = Clusters[i].Low->getValue();
8738     TotalCases[i] = (Hi - Lo).getLimitedValue() + 1;
8739     if (i != 0)
8740       TotalCases[i] += TotalCases[i - 1];
8741   }
8742 
8743   const unsigned MinDensity =
8744     OptForSize ? OptsizeJumpTableDensity : JumpTableDensity;
8745 
8746   // Cheap case: the whole range may be suitable for jump table.
8747   unsigned JumpTableSize = (Clusters[N - 1].High->getValue() -
8748                             Clusters[0].Low->getValue())
8749                            .getLimitedValue(UINT_MAX - 1) + 1;
8750   if (JumpTableSize <= MaxJumpTableSize &&
8751       isDense(Clusters, TotalCases, 0, N - 1, MinDensity)) {
8752     CaseCluster JTCluster;
8753     if (buildJumpTable(Clusters, 0, N - 1, SI, DefaultMBB, JTCluster)) {
8754       Clusters[0] = JTCluster;
8755       Clusters.resize(1);
8756       return;
8757     }
8758   }
8759 
8760   // The algorithm below is not suitable for -O0.
8761   if (TM.getOptLevel() == CodeGenOpt::None)
8762     return;
8763 
8764   // Split Clusters into minimum number of dense partitions. The algorithm uses
8765   // the same idea as Kannan & Proebsting "Correction to 'Producing Good Code
8766   // for the Case Statement'" (1994), but builds the MinPartitions array in
8767   // reverse order to make it easier to reconstruct the partitions in ascending
8768   // order. In the choice between two optimal partitionings, it picks the one
8769   // which yields more jump tables.
8770 
8771   // MinPartitions[i] is the minimum nbr of partitions of Clusters[i..N-1].
8772   SmallVector<unsigned, 8> MinPartitions(N);
8773   // LastElement[i] is the last element of the partition starting at i.
8774   SmallVector<unsigned, 8> LastElement(N);
8775   // PartitionsScore[i] is used to break ties when choosing between two
8776   // partitionings resulting in the same number of partitions.
8777   SmallVector<unsigned, 8> PartitionsScore(N);
8778   // For PartitionsScore, a small number of comparisons is considered as good as
8779   // a jump table and a single comparison is considered better than a jump
8780   // table.
8781   enum PartitionScores : unsigned {
8782     NoTable = 0,
8783     Table = 1,
8784     FewCases = 1,
8785     SingleCase = 2
8786   };
8787 
8788   // Base case: There is only one way to partition Clusters[N-1].
8789   MinPartitions[N - 1] = 1;
8790   LastElement[N - 1] = N - 1;
8791   PartitionsScore[N - 1] = PartitionScores::SingleCase;
8792 
8793   // Note: loop indexes are signed to avoid underflow.
8794   for (int64_t i = N - 2; i >= 0; i--) {
8795     // Find optimal partitioning of Clusters[i..N-1].
8796     // Baseline: Put Clusters[i] into a partition on its own.
8797     MinPartitions[i] = MinPartitions[i + 1] + 1;
8798     LastElement[i] = i;
8799     PartitionsScore[i] = PartitionsScore[i + 1] + PartitionScores::SingleCase;
8800 
8801     // Search for a solution that results in fewer partitions.
8802     for (int64_t j = N - 1; j > i; j--) {
8803       // Try building a partition from Clusters[i..j].
8804       JumpTableSize = (Clusters[j].High->getValue() -
8805                        Clusters[i].Low->getValue())
8806                       .getLimitedValue(UINT_MAX - 1) + 1;
8807       if (JumpTableSize <= MaxJumpTableSize &&
8808           isDense(Clusters, TotalCases, i, j, MinDensity)) {
8809         unsigned NumPartitions = 1 + (j == N - 1 ? 0 : MinPartitions[j + 1]);
8810         unsigned Score = j == N - 1 ? 0 : PartitionsScore[j + 1];
8811         int64_t NumEntries = j - i + 1;
8812 
8813         if (NumEntries == 1)
8814           Score += PartitionScores::SingleCase;
8815         else if (NumEntries <= SmallNumberOfEntries)
8816           Score += PartitionScores::FewCases;
8817         else if (NumEntries >= MinJumpTableEntries)
8818           Score += PartitionScores::Table;
8819 
8820         // If this leads to fewer partitions, or to the same number of
8821         // partitions with better score, it is a better partitioning.
8822         if (NumPartitions < MinPartitions[i] ||
8823             (NumPartitions == MinPartitions[i] && Score > PartitionsScore[i])) {
8824           MinPartitions[i] = NumPartitions;
8825           LastElement[i] = j;
8826           PartitionsScore[i] = Score;
8827         }
8828       }
8829     }
8830   }
8831 
8832   // Iterate over the partitions, replacing some with jump tables in-place.
8833   unsigned DstIndex = 0;
8834   for (unsigned First = 0, Last; First < N; First = Last + 1) {
8835     Last = LastElement[First];
8836     assert(Last >= First);
8837     assert(DstIndex <= First);
8838     unsigned NumClusters = Last - First + 1;
8839 
8840     CaseCluster JTCluster;
8841     if (NumClusters >= MinJumpTableEntries &&
8842         buildJumpTable(Clusters, First, Last, SI, DefaultMBB, JTCluster)) {
8843       Clusters[DstIndex++] = JTCluster;
8844     } else {
8845       for (unsigned I = First; I <= Last; ++I)
8846         std::memmove(&Clusters[DstIndex++], &Clusters[I], sizeof(Clusters[I]));
8847     }
8848   }
8849   Clusters.resize(DstIndex);
8850 }
8851 
8852 bool SelectionDAGBuilder::rangeFitsInWord(const APInt &Low, const APInt &High) {
8853   // FIXME: Using the pointer type doesn't seem ideal.
8854   uint64_t BW = DAG.getDataLayout().getPointerSizeInBits();
8855   uint64_t Range = (High - Low).getLimitedValue(UINT64_MAX - 1) + 1;
8856   return Range <= BW;
8857 }
8858 
8859 bool SelectionDAGBuilder::isSuitableForBitTests(unsigned NumDests,
8860                                                 unsigned NumCmps,
8861                                                 const APInt &Low,
8862                                                 const APInt &High) {
8863   // FIXME: I don't think NumCmps is the correct metric: a single case and a
8864   // range of cases both require only one branch to lower. Just looking at the
8865   // number of clusters and destinations should be enough to decide whether to
8866   // build bit tests.
8867 
8868   // To lower a range with bit tests, the range must fit the bitwidth of a
8869   // machine word.
8870   if (!rangeFitsInWord(Low, High))
8871     return false;
8872 
8873   // Decide whether it's profitable to lower this range with bit tests. Each
8874   // destination requires a bit test and branch, and there is an overall range
8875   // check branch. For a small number of clusters, separate comparisons might be
8876   // cheaper, and for many destinations, splitting the range might be better.
8877   return (NumDests == 1 && NumCmps >= 3) ||
8878          (NumDests == 2 && NumCmps >= 5) ||
8879          (NumDests == 3 && NumCmps >= 6);
8880 }
8881 
8882 bool SelectionDAGBuilder::buildBitTests(CaseClusterVector &Clusters,
8883                                         unsigned First, unsigned Last,
8884                                         const SwitchInst *SI,
8885                                         CaseCluster &BTCluster) {
8886   assert(First <= Last);
8887   if (First == Last)
8888     return false;
8889 
8890   BitVector Dests(FuncInfo.MF->getNumBlockIDs());
8891   unsigned NumCmps = 0;
8892   for (int64_t I = First; I <= Last; ++I) {
8893     assert(Clusters[I].Kind == CC_Range);
8894     Dests.set(Clusters[I].MBB->getNumber());
8895     NumCmps += (Clusters[I].Low == Clusters[I].High) ? 1 : 2;
8896   }
8897   unsigned NumDests = Dests.count();
8898 
8899   APInt Low = Clusters[First].Low->getValue();
8900   APInt High = Clusters[Last].High->getValue();
8901   assert(Low.slt(High));
8902 
8903   if (!isSuitableForBitTests(NumDests, NumCmps, Low, High))
8904     return false;
8905 
8906   APInt LowBound;
8907   APInt CmpRange;
8908 
8909   const int BitWidth = DAG.getTargetLoweringInfo()
8910                            .getPointerTy(DAG.getDataLayout())
8911                            .getSizeInBits();
8912   assert(rangeFitsInWord(Low, High) && "Case range must fit in bit mask!");
8913 
8914   // Check if the clusters cover a contiguous range such that no value in the
8915   // range will jump to the default statement.
8916   bool ContiguousRange = true;
8917   for (int64_t I = First + 1; I <= Last; ++I) {
8918     if (Clusters[I].Low->getValue() != Clusters[I - 1].High->getValue() + 1) {
8919       ContiguousRange = false;
8920       break;
8921     }
8922   }
8923 
8924   if (Low.isStrictlyPositive() && High.slt(BitWidth)) {
8925     // Optimize the case where all the case values fit in a word without having
8926     // to subtract minValue. In this case, we can optimize away the subtraction.
8927     LowBound = APInt::getNullValue(Low.getBitWidth());
8928     CmpRange = High;
8929     ContiguousRange = false;
8930   } else {
8931     LowBound = Low;
8932     CmpRange = High - Low;
8933   }
8934 
8935   CaseBitsVector CBV;
8936   auto TotalProb = BranchProbability::getZero();
8937   for (unsigned i = First; i <= Last; ++i) {
8938     // Find the CaseBits for this destination.
8939     unsigned j;
8940     for (j = 0; j < CBV.size(); ++j)
8941       if (CBV[j].BB == Clusters[i].MBB)
8942         break;
8943     if (j == CBV.size())
8944       CBV.push_back(
8945           CaseBits(0, Clusters[i].MBB, 0, BranchProbability::getZero()));
8946     CaseBits *CB = &CBV[j];
8947 
8948     // Update Mask, Bits and ExtraProb.
8949     uint64_t Lo = (Clusters[i].Low->getValue() - LowBound).getZExtValue();
8950     uint64_t Hi = (Clusters[i].High->getValue() - LowBound).getZExtValue();
8951     assert(Hi >= Lo && Hi < 64 && "Invalid bit case!");
8952     CB->Mask |= (-1ULL >> (63 - (Hi - Lo))) << Lo;
8953     CB->Bits += Hi - Lo + 1;
8954     CB->ExtraProb += Clusters[i].Prob;
8955     TotalProb += Clusters[i].Prob;
8956   }
8957 
8958   BitTestInfo BTI;
8959   std::sort(CBV.begin(), CBV.end(), [](const CaseBits &a, const CaseBits &b) {
8960     // Sort by probability first, number of bits second.
8961     if (a.ExtraProb != b.ExtraProb)
8962       return a.ExtraProb > b.ExtraProb;
8963     return a.Bits > b.Bits;
8964   });
8965 
8966   for (auto &CB : CBV) {
8967     MachineBasicBlock *BitTestBB =
8968         FuncInfo.MF->CreateMachineBasicBlock(SI->getParent());
8969     BTI.push_back(BitTestCase(CB.Mask, BitTestBB, CB.BB, CB.ExtraProb));
8970   }
8971   BitTestCases.emplace_back(std::move(LowBound), std::move(CmpRange),
8972                             SI->getCondition(), -1U, MVT::Other, false,
8973                             ContiguousRange, nullptr, nullptr, std::move(BTI),
8974                             TotalProb);
8975 
8976   BTCluster = CaseCluster::bitTests(Clusters[First].Low, Clusters[Last].High,
8977                                     BitTestCases.size() - 1, TotalProb);
8978   return true;
8979 }
8980 
8981 void SelectionDAGBuilder::findBitTestClusters(CaseClusterVector &Clusters,
8982                                               const SwitchInst *SI) {
8983 // Partition Clusters into as few subsets as possible, where each subset has a
8984 // range that fits in a machine word and has <= 3 unique destinations.
8985 
8986 #ifndef NDEBUG
8987   // Clusters must be sorted and contain Range or JumpTable clusters.
8988   assert(!Clusters.empty());
8989   assert(Clusters[0].Kind == CC_Range || Clusters[0].Kind == CC_JumpTable);
8990   for (const CaseCluster &C : Clusters)
8991     assert(C.Kind == CC_Range || C.Kind == CC_JumpTable);
8992   for (unsigned i = 1; i < Clusters.size(); ++i)
8993     assert(Clusters[i-1].High->getValue().slt(Clusters[i].Low->getValue()));
8994 #endif
8995 
8996   // The algorithm below is not suitable for -O0.
8997   if (TM.getOptLevel() == CodeGenOpt::None)
8998     return;
8999 
9000   // If target does not have legal shift left, do not emit bit tests at all.
9001   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9002   EVT PTy = TLI.getPointerTy(DAG.getDataLayout());
9003   if (!TLI.isOperationLegal(ISD::SHL, PTy))
9004     return;
9005 
9006   int BitWidth = PTy.getSizeInBits();
9007   const int64_t N = Clusters.size();
9008 
9009   // MinPartitions[i] is the minimum nbr of partitions of Clusters[i..N-1].
9010   SmallVector<unsigned, 8> MinPartitions(N);
9011   // LastElement[i] is the last element of the partition starting at i.
9012   SmallVector<unsigned, 8> LastElement(N);
9013 
9014   // FIXME: This might not be the best algorithm for finding bit test clusters.
9015 
9016   // Base case: There is only one way to partition Clusters[N-1].
9017   MinPartitions[N - 1] = 1;
9018   LastElement[N - 1] = N - 1;
9019 
9020   // Note: loop indexes are signed to avoid underflow.
9021   for (int64_t i = N - 2; i >= 0; --i) {
9022     // Find optimal partitioning of Clusters[i..N-1].
9023     // Baseline: Put Clusters[i] into a partition on its own.
9024     MinPartitions[i] = MinPartitions[i + 1] + 1;
9025     LastElement[i] = i;
9026 
9027     // Search for a solution that results in fewer partitions.
9028     // Note: the search is limited by BitWidth, reducing time complexity.
9029     for (int64_t j = std::min(N - 1, i + BitWidth - 1); j > i; --j) {
9030       // Try building a partition from Clusters[i..j].
9031 
9032       // Check the range.
9033       if (!rangeFitsInWord(Clusters[i].Low->getValue(),
9034                            Clusters[j].High->getValue()))
9035         continue;
9036 
9037       // Check nbr of destinations and cluster types.
9038       // FIXME: This works, but doesn't seem very efficient.
9039       bool RangesOnly = true;
9040       BitVector Dests(FuncInfo.MF->getNumBlockIDs());
9041       for (int64_t k = i; k <= j; k++) {
9042         if (Clusters[k].Kind != CC_Range) {
9043           RangesOnly = false;
9044           break;
9045         }
9046         Dests.set(Clusters[k].MBB->getNumber());
9047       }
9048       if (!RangesOnly || Dests.count() > 3)
9049         break;
9050 
9051       // Check if it's a better partition.
9052       unsigned NumPartitions = 1 + (j == N - 1 ? 0 : MinPartitions[j + 1]);
9053       if (NumPartitions < MinPartitions[i]) {
9054         // Found a better partition.
9055         MinPartitions[i] = NumPartitions;
9056         LastElement[i] = j;
9057       }
9058     }
9059   }
9060 
9061   // Iterate over the partitions, replacing with bit-test clusters in-place.
9062   unsigned DstIndex = 0;
9063   for (unsigned First = 0, Last; First < N; First = Last + 1) {
9064     Last = LastElement[First];
9065     assert(First <= Last);
9066     assert(DstIndex <= First);
9067 
9068     CaseCluster BitTestCluster;
9069     if (buildBitTests(Clusters, First, Last, SI, BitTestCluster)) {
9070       Clusters[DstIndex++] = BitTestCluster;
9071     } else {
9072       size_t NumClusters = Last - First + 1;
9073       std::memmove(&Clusters[DstIndex], &Clusters[First],
9074                    sizeof(Clusters[0]) * NumClusters);
9075       DstIndex += NumClusters;
9076     }
9077   }
9078   Clusters.resize(DstIndex);
9079 }
9080 
9081 void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W, Value *Cond,
9082                                         MachineBasicBlock *SwitchMBB,
9083                                         MachineBasicBlock *DefaultMBB) {
9084   MachineFunction *CurMF = FuncInfo.MF;
9085   MachineBasicBlock *NextMBB = nullptr;
9086   MachineFunction::iterator BBI(W.MBB);
9087   if (++BBI != FuncInfo.MF->end())
9088     NextMBB = &*BBI;
9089 
9090   unsigned Size = W.LastCluster - W.FirstCluster + 1;
9091 
9092   BranchProbabilityInfo *BPI = FuncInfo.BPI;
9093 
9094   if (Size == 2 && W.MBB == SwitchMBB) {
9095     // If any two of the cases has the same destination, and if one value
9096     // is the same as the other, but has one bit unset that the other has set,
9097     // use bit manipulation to do two compares at once.  For example:
9098     // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
9099     // TODO: This could be extended to merge any 2 cases in switches with 3
9100     // cases.
9101     // TODO: Handle cases where W.CaseBB != SwitchBB.
9102     CaseCluster &Small = *W.FirstCluster;
9103     CaseCluster &Big = *W.LastCluster;
9104 
9105     if (Small.Low == Small.High && Big.Low == Big.High &&
9106         Small.MBB == Big.MBB) {
9107       const APInt &SmallValue = Small.Low->getValue();
9108       const APInt &BigValue = Big.Low->getValue();
9109 
9110       // Check that there is only one bit different.
9111       APInt CommonBit = BigValue ^ SmallValue;
9112       if (CommonBit.isPowerOf2()) {
9113         SDValue CondLHS = getValue(Cond);
9114         EVT VT = CondLHS.getValueType();
9115         SDLoc DL = getCurSDLoc();
9116 
9117         SDValue Or = DAG.getNode(ISD::OR, DL, VT, CondLHS,
9118                                  DAG.getConstant(CommonBit, DL, VT));
9119         SDValue Cond = DAG.getSetCC(
9120             DL, MVT::i1, Or, DAG.getConstant(BigValue | SmallValue, DL, VT),
9121             ISD::SETEQ);
9122 
9123         // Update successor info.
9124         // Both Small and Big will jump to Small.BB, so we sum up the
9125         // probabilities.
9126         addSuccessorWithProb(SwitchMBB, Small.MBB, Small.Prob + Big.Prob);
9127         if (BPI)
9128           addSuccessorWithProb(
9129               SwitchMBB, DefaultMBB,
9130               // The default destination is the first successor in IR.
9131               BPI->getEdgeProbability(SwitchMBB->getBasicBlock(), (unsigned)0));
9132         else
9133           addSuccessorWithProb(SwitchMBB, DefaultMBB);
9134 
9135         // Insert the true branch.
9136         SDValue BrCond =
9137             DAG.getNode(ISD::BRCOND, DL, MVT::Other, getControlRoot(), Cond,
9138                         DAG.getBasicBlock(Small.MBB));
9139         // Insert the false branch.
9140         BrCond = DAG.getNode(ISD::BR, DL, MVT::Other, BrCond,
9141                              DAG.getBasicBlock(DefaultMBB));
9142 
9143         DAG.setRoot(BrCond);
9144         return;
9145       }
9146     }
9147   }
9148 
9149   if (TM.getOptLevel() != CodeGenOpt::None) {
9150     // Order cases by probability so the most likely case will be checked first.
9151     std::sort(W.FirstCluster, W.LastCluster + 1,
9152               [](const CaseCluster &a, const CaseCluster &b) {
9153       return a.Prob > b.Prob;
9154     });
9155 
9156     // Rearrange the case blocks so that the last one falls through if possible
9157     // without without changing the order of probabilities.
9158     for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster; ) {
9159       --I;
9160       if (I->Prob > W.LastCluster->Prob)
9161         break;
9162       if (I->Kind == CC_Range && I->MBB == NextMBB) {
9163         std::swap(*I, *W.LastCluster);
9164         break;
9165       }
9166     }
9167   }
9168 
9169   // Compute total probability.
9170   BranchProbability DefaultProb = W.DefaultProb;
9171   BranchProbability UnhandledProbs = DefaultProb;
9172   for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
9173     UnhandledProbs += I->Prob;
9174 
9175   MachineBasicBlock *CurMBB = W.MBB;
9176   for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
9177     MachineBasicBlock *Fallthrough;
9178     if (I == W.LastCluster) {
9179       // For the last cluster, fall through to the default destination.
9180       Fallthrough = DefaultMBB;
9181     } else {
9182       Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
9183       CurMF->insert(BBI, Fallthrough);
9184       // Put Cond in a virtual register to make it available from the new blocks.
9185       ExportFromCurrentBlock(Cond);
9186     }
9187     UnhandledProbs -= I->Prob;
9188 
9189     switch (I->Kind) {
9190       case CC_JumpTable: {
9191         // FIXME: Optimize away range check based on pivot comparisons.
9192         JumpTableHeader *JTH = &JTCases[I->JTCasesIndex].first;
9193         JumpTable *JT = &JTCases[I->JTCasesIndex].second;
9194 
9195         // The jump block hasn't been inserted yet; insert it here.
9196         MachineBasicBlock *JumpMBB = JT->MBB;
9197         CurMF->insert(BBI, JumpMBB);
9198 
9199         auto JumpProb = I->Prob;
9200         auto FallthroughProb = UnhandledProbs;
9201 
9202         // If the default statement is a target of the jump table, we evenly
9203         // distribute the default probability to successors of CurMBB. Also
9204         // update the probability on the edge from JumpMBB to Fallthrough.
9205         for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
9206                                               SE = JumpMBB->succ_end();
9207              SI != SE; ++SI) {
9208           if (*SI == DefaultMBB) {
9209             JumpProb += DefaultProb / 2;
9210             FallthroughProb -= DefaultProb / 2;
9211             JumpMBB->setSuccProbability(SI, DefaultProb / 2);
9212             JumpMBB->normalizeSuccProbs();
9213             break;
9214           }
9215         }
9216 
9217         addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
9218         addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
9219         CurMBB->normalizeSuccProbs();
9220 
9221         // The jump table header will be inserted in our current block, do the
9222         // range check, and fall through to our fallthrough block.
9223         JTH->HeaderBB = CurMBB;
9224         JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
9225 
9226         // If we're in the right place, emit the jump table header right now.
9227         if (CurMBB == SwitchMBB) {
9228           visitJumpTableHeader(*JT, *JTH, SwitchMBB);
9229           JTH->Emitted = true;
9230         }
9231         break;
9232       }
9233       case CC_BitTests: {
9234         // FIXME: Optimize away range check based on pivot comparisons.
9235         BitTestBlock *BTB = &BitTestCases[I->BTCasesIndex];
9236 
9237         // The bit test blocks haven't been inserted yet; insert them here.
9238         for (BitTestCase &BTC : BTB->Cases)
9239           CurMF->insert(BBI, BTC.ThisBB);
9240 
9241         // Fill in fields of the BitTestBlock.
9242         BTB->Parent = CurMBB;
9243         BTB->Default = Fallthrough;
9244 
9245         BTB->DefaultProb = UnhandledProbs;
9246         // If the cases in bit test don't form a contiguous range, we evenly
9247         // distribute the probability on the edge to Fallthrough to two
9248         // successors of CurMBB.
9249         if (!BTB->ContiguousRange) {
9250           BTB->Prob += DefaultProb / 2;
9251           BTB->DefaultProb -= DefaultProb / 2;
9252         }
9253 
9254         // If we're in the right place, emit the bit test header right now.
9255         if (CurMBB == SwitchMBB) {
9256           visitBitTestHeader(*BTB, SwitchMBB);
9257           BTB->Emitted = true;
9258         }
9259         break;
9260       }
9261       case CC_Range: {
9262         const Value *RHS, *LHS, *MHS;
9263         ISD::CondCode CC;
9264         if (I->Low == I->High) {
9265           // Check Cond == I->Low.
9266           CC = ISD::SETEQ;
9267           LHS = Cond;
9268           RHS=I->Low;
9269           MHS = nullptr;
9270         } else {
9271           // Check I->Low <= Cond <= I->High.
9272           CC = ISD::SETLE;
9273           LHS = I->Low;
9274           MHS = Cond;
9275           RHS = I->High;
9276         }
9277 
9278         // The false probability is the sum of all unhandled cases.
9279         CaseBlock CB(CC, LHS, RHS, MHS, I->MBB, Fallthrough, CurMBB, I->Prob,
9280                      UnhandledProbs);
9281 
9282         if (CurMBB == SwitchMBB)
9283           visitSwitchCase(CB, SwitchMBB);
9284         else
9285           SwitchCases.push_back(CB);
9286 
9287         break;
9288       }
9289     }
9290     CurMBB = Fallthrough;
9291   }
9292 }
9293 
9294 unsigned SelectionDAGBuilder::caseClusterRank(const CaseCluster &CC,
9295                                               CaseClusterIt First,
9296                                               CaseClusterIt Last) {
9297   return std::count_if(First, Last + 1, [&](const CaseCluster &X) {
9298     if (X.Prob != CC.Prob)
9299       return X.Prob > CC.Prob;
9300 
9301     // Ties are broken by comparing the case value.
9302     return X.Low->getValue().slt(CC.Low->getValue());
9303   });
9304 }
9305 
9306 void SelectionDAGBuilder::splitWorkItem(SwitchWorkList &WorkList,
9307                                         const SwitchWorkListItem &W,
9308                                         Value *Cond,
9309                                         MachineBasicBlock *SwitchMBB) {
9310   assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) &&
9311          "Clusters not sorted?");
9312 
9313   assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!");
9314 
9315   // Balance the tree based on branch probabilities to create a near-optimal (in
9316   // terms of search time given key frequency) binary search tree. See e.g. Kurt
9317   // Mehlhorn "Nearly Optimal Binary Search Trees" (1975).
9318   CaseClusterIt LastLeft = W.FirstCluster;
9319   CaseClusterIt FirstRight = W.LastCluster;
9320   auto LeftProb = LastLeft->Prob + W.DefaultProb / 2;
9321   auto RightProb = FirstRight->Prob + W.DefaultProb / 2;
9322 
9323   // Move LastLeft and FirstRight towards each other from opposite directions to
9324   // find a partitioning of the clusters which balances the probability on both
9325   // sides. If LeftProb and RightProb are equal, alternate which side is
9326   // taken to ensure 0-probability nodes are distributed evenly.
9327   unsigned I = 0;
9328   while (LastLeft + 1 < FirstRight) {
9329     if (LeftProb < RightProb || (LeftProb == RightProb && (I & 1)))
9330       LeftProb += (++LastLeft)->Prob;
9331     else
9332       RightProb += (--FirstRight)->Prob;
9333     I++;
9334   }
9335 
9336   for (;;) {
9337     // Our binary search tree differs from a typical BST in that ours can have up
9338     // to three values in each leaf. The pivot selection above doesn't take that
9339     // into account, which means the tree might require more nodes and be less
9340     // efficient. We compensate for this here.
9341 
9342     unsigned NumLeft = LastLeft - W.FirstCluster + 1;
9343     unsigned NumRight = W.LastCluster - FirstRight + 1;
9344 
9345     if (std::min(NumLeft, NumRight) < 3 && std::max(NumLeft, NumRight) > 3) {
9346       // If one side has less than 3 clusters, and the other has more than 3,
9347       // consider taking a cluster from the other side.
9348 
9349       if (NumLeft < NumRight) {
9350         // Consider moving the first cluster on the right to the left side.
9351         CaseCluster &CC = *FirstRight;
9352         unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
9353         unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
9354         if (LeftSideRank <= RightSideRank) {
9355           // Moving the cluster to the left does not demote it.
9356           ++LastLeft;
9357           ++FirstRight;
9358           continue;
9359         }
9360       } else {
9361         assert(NumRight < NumLeft);
9362         // Consider moving the last element on the left to the right side.
9363         CaseCluster &CC = *LastLeft;
9364         unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
9365         unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
9366         if (RightSideRank <= LeftSideRank) {
9367           // Moving the cluster to the right does not demot it.
9368           --LastLeft;
9369           --FirstRight;
9370           continue;
9371         }
9372       }
9373     }
9374     break;
9375   }
9376 
9377   assert(LastLeft + 1 == FirstRight);
9378   assert(LastLeft >= W.FirstCluster);
9379   assert(FirstRight <= W.LastCluster);
9380 
9381   // Use the first element on the right as pivot since we will make less-than
9382   // comparisons against it.
9383   CaseClusterIt PivotCluster = FirstRight;
9384   assert(PivotCluster > W.FirstCluster);
9385   assert(PivotCluster <= W.LastCluster);
9386 
9387   CaseClusterIt FirstLeft = W.FirstCluster;
9388   CaseClusterIt LastRight = W.LastCluster;
9389 
9390   const ConstantInt *Pivot = PivotCluster->Low;
9391 
9392   // New blocks will be inserted immediately after the current one.
9393   MachineFunction::iterator BBI(W.MBB);
9394   ++BBI;
9395 
9396   // We will branch to the LHS if Value < Pivot. If LHS is a single cluster,
9397   // we can branch to its destination directly if it's squeezed exactly in
9398   // between the known lower bound and Pivot - 1.
9399   MachineBasicBlock *LeftMBB;
9400   if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&
9401       FirstLeft->Low == W.GE &&
9402       (FirstLeft->High->getValue() + 1LL) == Pivot->getValue()) {
9403     LeftMBB = FirstLeft->MBB;
9404   } else {
9405     LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
9406     FuncInfo.MF->insert(BBI, LeftMBB);
9407     WorkList.push_back(
9408         {LeftMBB, FirstLeft, LastLeft, W.GE, Pivot, W.DefaultProb / 2});
9409     // Put Cond in a virtual register to make it available from the new blocks.
9410     ExportFromCurrentBlock(Cond);
9411   }
9412 
9413   // Similarly, we will branch to the RHS if Value >= Pivot. If RHS is a
9414   // single cluster, RHS.Low == Pivot, and we can branch to its destination
9415   // directly if RHS.High equals the current upper bound.
9416   MachineBasicBlock *RightMBB;
9417   if (FirstRight == LastRight && FirstRight->Kind == CC_Range &&
9418       W.LT && (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) {
9419     RightMBB = FirstRight->MBB;
9420   } else {
9421     RightMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
9422     FuncInfo.MF->insert(BBI, RightMBB);
9423     WorkList.push_back(
9424         {RightMBB, FirstRight, LastRight, Pivot, W.LT, W.DefaultProb / 2});
9425     // Put Cond in a virtual register to make it available from the new blocks.
9426     ExportFromCurrentBlock(Cond);
9427   }
9428 
9429   // Create the CaseBlock record that will be used to lower the branch.
9430   CaseBlock CB(ISD::SETLT, Cond, Pivot, nullptr, LeftMBB, RightMBB, W.MBB,
9431                LeftProb, RightProb);
9432 
9433   if (W.MBB == SwitchMBB)
9434     visitSwitchCase(CB, SwitchMBB);
9435   else
9436     SwitchCases.push_back(CB);
9437 }
9438 
9439 void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
9440   // Extract cases from the switch.
9441   BranchProbabilityInfo *BPI = FuncInfo.BPI;
9442   CaseClusterVector Clusters;
9443   Clusters.reserve(SI.getNumCases());
9444   for (auto I : SI.cases()) {
9445     MachineBasicBlock *Succ = FuncInfo.MBBMap[I.getCaseSuccessor()];
9446     const ConstantInt *CaseVal = I.getCaseValue();
9447     BranchProbability Prob =
9448         BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
9449             : BranchProbability(1, SI.getNumCases() + 1);
9450     Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
9451   }
9452 
9453   MachineBasicBlock *DefaultMBB = FuncInfo.MBBMap[SI.getDefaultDest()];
9454 
9455   // Cluster adjacent cases with the same destination. We do this at all
9456   // optimization levels because it's cheap to do and will make codegen faster
9457   // if there are many clusters.
9458   sortAndRangeify(Clusters);
9459 
9460   if (TM.getOptLevel() != CodeGenOpt::None) {
9461     // Replace an unreachable default with the most popular destination.
9462     // FIXME: Exploit unreachable default more aggressively.
9463     bool UnreachableDefault =
9464         isa<UnreachableInst>(SI.getDefaultDest()->getFirstNonPHIOrDbg());
9465     if (UnreachableDefault && !Clusters.empty()) {
9466       DenseMap<const BasicBlock *, unsigned> Popularity;
9467       unsigned MaxPop = 0;
9468       const BasicBlock *MaxBB = nullptr;
9469       for (auto I : SI.cases()) {
9470         const BasicBlock *BB = I.getCaseSuccessor();
9471         if (++Popularity[BB] > MaxPop) {
9472           MaxPop = Popularity[BB];
9473           MaxBB = BB;
9474         }
9475       }
9476       // Set new default.
9477       assert(MaxPop > 0 && MaxBB);
9478       DefaultMBB = FuncInfo.MBBMap[MaxBB];
9479 
9480       // Remove cases that were pointing to the destination that is now the
9481       // default.
9482       CaseClusterVector New;
9483       New.reserve(Clusters.size());
9484       for (CaseCluster &CC : Clusters) {
9485         if (CC.MBB != DefaultMBB)
9486           New.push_back(CC);
9487       }
9488       Clusters = std::move(New);
9489     }
9490   }
9491 
9492   // If there is only the default destination, jump there directly.
9493   MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
9494   if (Clusters.empty()) {
9495     SwitchMBB->addSuccessor(DefaultMBB);
9496     if (DefaultMBB != NextBlock(SwitchMBB)) {
9497       DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
9498                               getControlRoot(), DAG.getBasicBlock(DefaultMBB)));
9499     }
9500     return;
9501   }
9502 
9503   findJumpTables(Clusters, &SI, DefaultMBB);
9504   findBitTestClusters(Clusters, &SI);
9505 
9506   DEBUG({
9507     dbgs() << "Case clusters: ";
9508     for (const CaseCluster &C : Clusters) {
9509       if (C.Kind == CC_JumpTable) dbgs() << "JT:";
9510       if (C.Kind == CC_BitTests) dbgs() << "BT:";
9511 
9512       C.Low->getValue().print(dbgs(), true);
9513       if (C.Low != C.High) {
9514         dbgs() << '-';
9515         C.High->getValue().print(dbgs(), true);
9516       }
9517       dbgs() << ' ';
9518     }
9519     dbgs() << '\n';
9520   });
9521 
9522   assert(!Clusters.empty());
9523   SwitchWorkList WorkList;
9524   CaseClusterIt First = Clusters.begin();
9525   CaseClusterIt Last = Clusters.end() - 1;
9526   auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB);
9527   WorkList.push_back({SwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
9528 
9529   while (!WorkList.empty()) {
9530     SwitchWorkListItem W = WorkList.back();
9531     WorkList.pop_back();
9532     unsigned NumClusters = W.LastCluster - W.FirstCluster + 1;
9533 
9534     if (NumClusters > 3 && TM.getOptLevel() != CodeGenOpt::None &&
9535         !DefaultMBB->getParent()->getFunction()->optForMinSize()) {
9536       // For optimized builds, lower large range as a balanced binary tree.
9537       splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB);
9538       continue;
9539     }
9540 
9541     lowerWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB);
9542   }
9543 }
9544