xref: /llvm-project/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp (revision af92b7a3b899ba72b48b35b0006c58b1dfcb43c0)
1 //===- SelectionDAGBuilder.cpp - Selection-DAG building -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements routines for translating from LLVM IR into SelectionDAG IR.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "SelectionDAGBuilder.h"
14 #include "SDNodeDbgValue.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/BitVector.h"
19 #include "llvm/ADT/DenseMap.h"
20 #include "llvm/ADT/None.h"
21 #include "llvm/ADT/Optional.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/ADT/SmallPtrSet.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/StringRef.h"
27 #include "llvm/ADT/Triple.h"
28 #include "llvm/ADT/Twine.h"
29 #include "llvm/Analysis/AliasAnalysis.h"
30 #include "llvm/Analysis/BranchProbabilityInfo.h"
31 #include "llvm/Analysis/ConstantFolding.h"
32 #include "llvm/Analysis/EHPersonalities.h"
33 #include "llvm/Analysis/Loads.h"
34 #include "llvm/Analysis/MemoryLocation.h"
35 #include "llvm/Analysis/TargetLibraryInfo.h"
36 #include "llvm/Analysis/ValueTracking.h"
37 #include "llvm/Analysis/VectorUtils.h"
38 #include "llvm/CodeGen/Analysis.h"
39 #include "llvm/CodeGen/FunctionLoweringInfo.h"
40 #include "llvm/CodeGen/GCMetadata.h"
41 #include "llvm/CodeGen/ISDOpcodes.h"
42 #include "llvm/CodeGen/MachineBasicBlock.h"
43 #include "llvm/CodeGen/MachineFrameInfo.h"
44 #include "llvm/CodeGen/MachineFunction.h"
45 #include "llvm/CodeGen/MachineInstr.h"
46 #include "llvm/CodeGen/MachineInstrBuilder.h"
47 #include "llvm/CodeGen/MachineJumpTableInfo.h"
48 #include "llvm/CodeGen/MachineMemOperand.h"
49 #include "llvm/CodeGen/MachineModuleInfo.h"
50 #include "llvm/CodeGen/MachineOperand.h"
51 #include "llvm/CodeGen/MachineRegisterInfo.h"
52 #include "llvm/CodeGen/RuntimeLibcalls.h"
53 #include "llvm/CodeGen/SelectionDAG.h"
54 #include "llvm/CodeGen/SelectionDAGNodes.h"
55 #include "llvm/CodeGen/SelectionDAGTargetInfo.h"
56 #include "llvm/CodeGen/StackMaps.h"
57 #include "llvm/CodeGen/TargetFrameLowering.h"
58 #include "llvm/CodeGen/TargetInstrInfo.h"
59 #include "llvm/CodeGen/TargetLowering.h"
60 #include "llvm/CodeGen/TargetOpcodes.h"
61 #include "llvm/CodeGen/TargetRegisterInfo.h"
62 #include "llvm/CodeGen/TargetSubtargetInfo.h"
63 #include "llvm/CodeGen/ValueTypes.h"
64 #include "llvm/CodeGen/WinEHFuncInfo.h"
65 #include "llvm/IR/Argument.h"
66 #include "llvm/IR/Attributes.h"
67 #include "llvm/IR/BasicBlock.h"
68 #include "llvm/IR/CFG.h"
69 #include "llvm/IR/CallSite.h"
70 #include "llvm/IR/CallingConv.h"
71 #include "llvm/IR/Constant.h"
72 #include "llvm/IR/ConstantRange.h"
73 #include "llvm/IR/Constants.h"
74 #include "llvm/IR/DataLayout.h"
75 #include "llvm/IR/DebugInfoMetadata.h"
76 #include "llvm/IR/DebugLoc.h"
77 #include "llvm/IR/DerivedTypes.h"
78 #include "llvm/IR/Function.h"
79 #include "llvm/IR/GetElementPtrTypeIterator.h"
80 #include "llvm/IR/InlineAsm.h"
81 #include "llvm/IR/InstrTypes.h"
82 #include "llvm/IR/Instruction.h"
83 #include "llvm/IR/Instructions.h"
84 #include "llvm/IR/IntrinsicInst.h"
85 #include "llvm/IR/Intrinsics.h"
86 #include "llvm/IR/LLVMContext.h"
87 #include "llvm/IR/Metadata.h"
88 #include "llvm/IR/Module.h"
89 #include "llvm/IR/Operator.h"
90 #include "llvm/IR/PatternMatch.h"
91 #include "llvm/IR/Statepoint.h"
92 #include "llvm/IR/Type.h"
93 #include "llvm/IR/User.h"
94 #include "llvm/IR/Value.h"
95 #include "llvm/MC/MCContext.h"
96 #include "llvm/MC/MCSymbol.h"
97 #include "llvm/Support/AtomicOrdering.h"
98 #include "llvm/Support/BranchProbability.h"
99 #include "llvm/Support/Casting.h"
100 #include "llvm/Support/CodeGen.h"
101 #include "llvm/Support/CommandLine.h"
102 #include "llvm/Support/Compiler.h"
103 #include "llvm/Support/Debug.h"
104 #include "llvm/Support/ErrorHandling.h"
105 #include "llvm/Support/MachineValueType.h"
106 #include "llvm/Support/MathExtras.h"
107 #include "llvm/Support/raw_ostream.h"
108 #include "llvm/Target/TargetIntrinsicInfo.h"
109 #include "llvm/Target/TargetMachine.h"
110 #include "llvm/Target/TargetOptions.h"
111 #include "llvm/Transforms/Utils/Local.h"
112 #include <algorithm>
113 #include <cassert>
114 #include <cstddef>
115 #include <cstdint>
116 #include <cstring>
117 #include <iterator>
118 #include <limits>
119 #include <numeric>
120 #include <tuple>
121 #include <utility>
122 #include <vector>
123 
124 using namespace llvm;
125 using namespace PatternMatch;
126 
127 #define DEBUG_TYPE "isel"
128 
129 /// LimitFloatPrecision - Generate low-precision inline sequences for
130 /// some float libcalls (6, 8 or 12 bits).
131 static unsigned LimitFloatPrecision;
132 
133 static cl::opt<unsigned, true>
134     LimitFPPrecision("limit-float-precision",
135                      cl::desc("Generate low-precision inline sequences "
136                               "for some float libcalls"),
137                      cl::location(LimitFloatPrecision), cl::Hidden,
138                      cl::init(0));
139 
140 static cl::opt<unsigned> SwitchPeelThreshold(
141     "switch-peel-threshold", cl::Hidden, cl::init(66),
142     cl::desc("Set the case probability threshold for peeling the case from a "
143              "switch statement. A value greater than 100 will void this "
144              "optimization"));
145 
146 // Limit the width of DAG chains. This is important in general to prevent
147 // DAG-based analysis from blowing up. For example, alias analysis and
148 // load clustering may not complete in reasonable time. It is difficult to
149 // recognize and avoid this situation within each individual analysis, and
150 // future analyses are likely to have the same behavior. Limiting DAG width is
151 // the safe approach and will be especially important with global DAGs.
152 //
153 // MaxParallelChains default is arbitrarily high to avoid affecting
154 // optimization, but could be lowered to improve compile time. Any ld-ld-st-st
155 // sequence over this should have been converted to llvm.memcpy by the
156 // frontend. It is easy to induce this behavior with .ll code such as:
157 // %buffer = alloca [4096 x i8]
158 // %data = load [4096 x i8]* %argPtr
159 // store [4096 x i8] %data, [4096 x i8]* %buffer
160 static const unsigned MaxParallelChains = 64;
161 
162 // Return the calling convention if the Value passed requires ABI mangling as it
163 // is a parameter to a function or a return value from a function which is not
164 // an intrinsic.
165 static Optional<CallingConv::ID> getABIRegCopyCC(const Value *V) {
166   if (auto *R = dyn_cast<ReturnInst>(V))
167     return R->getParent()->getParent()->getCallingConv();
168 
169   if (auto *CI = dyn_cast<CallInst>(V)) {
170     const bool IsInlineAsm = CI->isInlineAsm();
171     const bool IsIndirectFunctionCall =
172         !IsInlineAsm && !CI->getCalledFunction();
173 
174     // It is possible that the call instruction is an inline asm statement or an
175     // indirect function call in which case the return value of
176     // getCalledFunction() would be nullptr.
177     const bool IsInstrinsicCall =
178         !IsInlineAsm && !IsIndirectFunctionCall &&
179         CI->getCalledFunction()->getIntrinsicID() != Intrinsic::not_intrinsic;
180 
181     if (!IsInlineAsm && !IsInstrinsicCall)
182       return CI->getCallingConv();
183   }
184 
185   return None;
186 }
187 
188 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
189                                       const SDValue *Parts, unsigned NumParts,
190                                       MVT PartVT, EVT ValueVT, const Value *V,
191                                       Optional<CallingConv::ID> CC);
192 
193 /// getCopyFromParts - Create a value that contains the specified legal parts
194 /// combined into the value they represent.  If the parts combine to a type
195 /// larger than ValueVT then AssertOp can be used to specify whether the extra
196 /// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
197 /// (ISD::AssertSext).
198 static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL,
199                                 const SDValue *Parts, unsigned NumParts,
200                                 MVT PartVT, EVT ValueVT, const Value *V,
201                                 Optional<CallingConv::ID> CC = None,
202                                 Optional<ISD::NodeType> AssertOp = None) {
203   if (ValueVT.isVector())
204     return getCopyFromPartsVector(DAG, DL, Parts, NumParts, PartVT, ValueVT, V,
205                                   CC);
206 
207   assert(NumParts > 0 && "No parts to assemble!");
208   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
209   SDValue Val = Parts[0];
210 
211   if (NumParts > 1) {
212     // Assemble the value from multiple parts.
213     if (ValueVT.isInteger()) {
214       unsigned PartBits = PartVT.getSizeInBits();
215       unsigned ValueBits = ValueVT.getSizeInBits();
216 
217       // Assemble the power of 2 part.
218       unsigned RoundParts = NumParts & (NumParts - 1) ?
219         1 << Log2_32(NumParts) : NumParts;
220       unsigned RoundBits = PartBits * RoundParts;
221       EVT RoundVT = RoundBits == ValueBits ?
222         ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
223       SDValue Lo, Hi;
224 
225       EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
226 
227       if (RoundParts > 2) {
228         Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2,
229                               PartVT, HalfVT, V);
230         Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2,
231                               RoundParts / 2, PartVT, HalfVT, V);
232       } else {
233         Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]);
234         Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]);
235       }
236 
237       if (DAG.getDataLayout().isBigEndian())
238         std::swap(Lo, Hi);
239 
240       Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi);
241 
242       if (RoundParts < NumParts) {
243         // Assemble the trailing non-power-of-2 part.
244         unsigned OddParts = NumParts - RoundParts;
245         EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
246         Hi = getCopyFromParts(DAG, DL, Parts + RoundParts, OddParts, PartVT,
247                               OddVT, V, CC);
248 
249         // Combine the round and odd parts.
250         Lo = Val;
251         if (DAG.getDataLayout().isBigEndian())
252           std::swap(Lo, Hi);
253         EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
254         Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi);
255         Hi =
256             DAG.getNode(ISD::SHL, DL, TotalVT, Hi,
257                         DAG.getConstant(Lo.getValueSizeInBits(), DL,
258                                         TLI.getPointerTy(DAG.getDataLayout())));
259         Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo);
260         Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi);
261       }
262     } else if (PartVT.isFloatingPoint()) {
263       // FP split into multiple FP parts (for ppcf128)
264       assert(ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
265              "Unexpected split");
266       SDValue Lo, Hi;
267       Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]);
268       Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]);
269       if (TLI.hasBigEndianPartOrdering(ValueVT, DAG.getDataLayout()))
270         std::swap(Lo, Hi);
271       Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi);
272     } else {
273       // FP split into integer parts (soft fp)
274       assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
275              !PartVT.isVector() && "Unexpected split");
276       EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
277       Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V, CC);
278     }
279   }
280 
281   // There is now one part, held in Val.  Correct it to match ValueVT.
282   // PartEVT is the type of the register class that holds the value.
283   // ValueVT is the type of the inline asm operation.
284   EVT PartEVT = Val.getValueType();
285 
286   if (PartEVT == ValueVT)
287     return Val;
288 
289   if (PartEVT.isInteger() && ValueVT.isFloatingPoint() &&
290       ValueVT.bitsLT(PartEVT)) {
291     // For an FP value in an integer part, we need to truncate to the right
292     // width first.
293     PartEVT = EVT::getIntegerVT(*DAG.getContext(),  ValueVT.getSizeInBits());
294     Val = DAG.getNode(ISD::TRUNCATE, DL, PartEVT, Val);
295   }
296 
297   // Handle types that have the same size.
298   if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits())
299     return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
300 
301   // Handle types with different sizes.
302   if (PartEVT.isInteger() && ValueVT.isInteger()) {
303     if (ValueVT.bitsLT(PartEVT)) {
304       // For a truncate, see if we have any information to
305       // indicate whether the truncated bits will always be
306       // zero or sign-extension.
307       if (AssertOp.hasValue())
308         Val = DAG.getNode(*AssertOp, DL, PartEVT, Val,
309                           DAG.getValueType(ValueVT));
310       return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
311     }
312     return DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
313   }
314 
315   if (PartEVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
316     // FP_ROUND's are always exact here.
317     if (ValueVT.bitsLT(Val.getValueType()))
318       return DAG.getNode(
319           ISD::FP_ROUND, DL, ValueVT, Val,
320           DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout())));
321 
322     return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val);
323   }
324 
325   llvm_unreachable("Unknown mismatch!");
326 }
327 
328 static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V,
329                                               const Twine &ErrMsg) {
330   const Instruction *I = dyn_cast_or_null<Instruction>(V);
331   if (!V)
332     return Ctx.emitError(ErrMsg);
333 
334   const char *AsmError = ", possible invalid constraint for vector type";
335   if (const CallInst *CI = dyn_cast<CallInst>(I))
336     if (isa<InlineAsm>(CI->getCalledValue()))
337       return Ctx.emitError(I, ErrMsg + AsmError);
338 
339   return Ctx.emitError(I, ErrMsg);
340 }
341 
342 /// getCopyFromPartsVector - Create a value that contains the specified legal
343 /// parts combined into the value they represent.  If the parts combine to a
344 /// type larger than ValueVT then AssertOp can be used to specify whether the
345 /// extra bits are known to be zero (ISD::AssertZext) or sign extended from
346 /// ValueVT (ISD::AssertSext).
347 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
348                                       const SDValue *Parts, unsigned NumParts,
349                                       MVT PartVT, EVT ValueVT, const Value *V,
350                                       Optional<CallingConv::ID> CallConv) {
351   assert(ValueVT.isVector() && "Not a vector value");
352   assert(NumParts > 0 && "No parts to assemble!");
353   const bool IsABIRegCopy = CallConv.hasValue();
354 
355   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
356   SDValue Val = Parts[0];
357 
358   // Handle a multi-element vector.
359   if (NumParts > 1) {
360     EVT IntermediateVT;
361     MVT RegisterVT;
362     unsigned NumIntermediates;
363     unsigned NumRegs;
364 
365     if (IsABIRegCopy) {
366       NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
367           *DAG.getContext(), CallConv.getValue(), ValueVT, IntermediateVT,
368           NumIntermediates, RegisterVT);
369     } else {
370       NumRegs =
371           TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
372                                      NumIntermediates, RegisterVT);
373     }
374 
375     assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
376     NumParts = NumRegs; // Silence a compiler warning.
377     assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
378     assert(RegisterVT.getSizeInBits() ==
379            Parts[0].getSimpleValueType().getSizeInBits() &&
380            "Part type sizes don't match!");
381 
382     // Assemble the parts into intermediate operands.
383     SmallVector<SDValue, 8> Ops(NumIntermediates);
384     if (NumIntermediates == NumParts) {
385       // If the register was not expanded, truncate or copy the value,
386       // as appropriate.
387       for (unsigned i = 0; i != NumParts; ++i)
388         Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1,
389                                   PartVT, IntermediateVT, V);
390     } else if (NumParts > 0) {
391       // If the intermediate type was expanded, build the intermediate
392       // operands from the parts.
393       assert(NumParts % NumIntermediates == 0 &&
394              "Must expand into a divisible number of parts!");
395       unsigned Factor = NumParts / NumIntermediates;
396       for (unsigned i = 0; i != NumIntermediates; ++i)
397         Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor,
398                                   PartVT, IntermediateVT, V);
399     }
400 
401     // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
402     // intermediate operands.
403     EVT BuiltVectorTy =
404         EVT::getVectorVT(*DAG.getContext(), IntermediateVT.getScalarType(),
405                          (IntermediateVT.isVector()
406                               ? IntermediateVT.getVectorNumElements() * NumParts
407                               : NumIntermediates));
408     Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS
409                                                 : ISD::BUILD_VECTOR,
410                       DL, BuiltVectorTy, Ops);
411   }
412 
413   // There is now one part, held in Val.  Correct it to match ValueVT.
414   EVT PartEVT = Val.getValueType();
415 
416   if (PartEVT == ValueVT)
417     return Val;
418 
419   if (PartEVT.isVector()) {
420     // If the element type of the source/dest vectors are the same, but the
421     // parts vector has more elements than the value vector, then we have a
422     // vector widening case (e.g. <2 x float> -> <4 x float>).  Extract the
423     // elements we want.
424     if (PartEVT.getVectorElementType() == ValueVT.getVectorElementType()) {
425       assert(PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements() &&
426              "Cannot narrow, it would be a lossy transformation");
427       return DAG.getNode(
428           ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
429           DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
430     }
431 
432     // Vector/Vector bitcast.
433     if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
434       return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
435 
436     assert(PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements() &&
437       "Cannot handle this kind of promotion");
438     // Promoted vector extract
439     return DAG.getAnyExtOrTrunc(Val, DL, ValueVT);
440 
441   }
442 
443   // Trivial bitcast if the types are the same size and the destination
444   // vector type is legal.
445   if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits() &&
446       TLI.isTypeLegal(ValueVT))
447     return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
448 
449   if (ValueVT.getVectorNumElements() != 1) {
450      // Certain ABIs require that vectors are passed as integers. For vectors
451      // are the same size, this is an obvious bitcast.
452      if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) {
453        return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
454      } else if (ValueVT.getSizeInBits() < PartEVT.getSizeInBits()) {
455        // Bitcast Val back the original type and extract the corresponding
456        // vector we want.
457        unsigned Elts = PartEVT.getSizeInBits() / ValueVT.getScalarSizeInBits();
458        EVT WiderVecType = EVT::getVectorVT(*DAG.getContext(),
459                                            ValueVT.getVectorElementType(), Elts);
460        Val = DAG.getBitcast(WiderVecType, Val);
461        return DAG.getNode(
462            ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
463            DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
464      }
465 
466      diagnosePossiblyInvalidConstraint(
467          *DAG.getContext(), V, "non-trivial scalar-to-vector conversion");
468      return DAG.getUNDEF(ValueVT);
469   }
470 
471   // Handle cases such as i8 -> <1 x i1>
472   EVT ValueSVT = ValueVT.getVectorElementType();
473   if (ValueVT.getVectorNumElements() == 1 && ValueSVT != PartEVT)
474     Val = ValueVT.isFloatingPoint() ? DAG.getFPExtendOrRound(Val, DL, ValueSVT)
475                                     : DAG.getAnyExtOrTrunc(Val, DL, ValueSVT);
476 
477   return DAG.getBuildVector(ValueVT, DL, Val);
478 }
479 
480 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl,
481                                  SDValue Val, SDValue *Parts, unsigned NumParts,
482                                  MVT PartVT, const Value *V,
483                                  Optional<CallingConv::ID> CallConv);
484 
485 /// getCopyToParts - Create a series of nodes that contain the specified value
486 /// split into legal parts.  If the parts contain more bits than Val, then, for
487 /// integers, ExtendKind can be used to specify how to generate the extra bits.
488 static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val,
489                            SDValue *Parts, unsigned NumParts, MVT PartVT,
490                            const Value *V,
491                            Optional<CallingConv::ID> CallConv = None,
492                            ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
493   EVT ValueVT = Val.getValueType();
494 
495   // Handle the vector case separately.
496   if (ValueVT.isVector())
497     return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V,
498                                 CallConv);
499 
500   unsigned PartBits = PartVT.getSizeInBits();
501   unsigned OrigNumParts = NumParts;
502   assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) &&
503          "Copying to an illegal type!");
504 
505   if (NumParts == 0)
506     return;
507 
508   assert(!ValueVT.isVector() && "Vector case handled elsewhere");
509   EVT PartEVT = PartVT;
510   if (PartEVT == ValueVT) {
511     assert(NumParts == 1 && "No-op copy with multiple parts!");
512     Parts[0] = Val;
513     return;
514   }
515 
516   if (NumParts * PartBits > ValueVT.getSizeInBits()) {
517     // If the parts cover more bits than the value has, promote the value.
518     if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
519       assert(NumParts == 1 && "Do not know what to promote to!");
520       Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val);
521     } else {
522       if (ValueVT.isFloatingPoint()) {
523         // FP values need to be bitcast, then extended if they are being put
524         // into a larger container.
525         ValueVT = EVT::getIntegerVT(*DAG.getContext(),  ValueVT.getSizeInBits());
526         Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
527       }
528       assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
529              ValueVT.isInteger() &&
530              "Unknown mismatch!");
531       ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
532       Val = DAG.getNode(ExtendKind, DL, ValueVT, Val);
533       if (PartVT == MVT::x86mmx)
534         Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
535     }
536   } else if (PartBits == ValueVT.getSizeInBits()) {
537     // Different types of the same size.
538     assert(NumParts == 1 && PartEVT != ValueVT);
539     Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
540   } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
541     // If the parts cover less bits than value has, truncate the value.
542     assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
543            ValueVT.isInteger() &&
544            "Unknown mismatch!");
545     ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
546     Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
547     if (PartVT == MVT::x86mmx)
548       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
549   }
550 
551   // The value may have changed - recompute ValueVT.
552   ValueVT = Val.getValueType();
553   assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
554          "Failed to tile the value with PartVT!");
555 
556   if (NumParts == 1) {
557     if (PartEVT != ValueVT) {
558       diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
559                                         "scalar-to-vector conversion failed");
560       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
561     }
562 
563     Parts[0] = Val;
564     return;
565   }
566 
567   // Expand the value into multiple parts.
568   if (NumParts & (NumParts - 1)) {
569     // The number of parts is not a power of 2.  Split off and copy the tail.
570     assert(PartVT.isInteger() && ValueVT.isInteger() &&
571            "Do not know what to expand to!");
572     unsigned RoundParts = 1 << Log2_32(NumParts);
573     unsigned RoundBits = RoundParts * PartBits;
574     unsigned OddParts = NumParts - RoundParts;
575     SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val,
576       DAG.getShiftAmountConstant(RoundBits, ValueVT, DL, /*LegalTypes*/false));
577 
578     getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V,
579                    CallConv);
580 
581     if (DAG.getDataLayout().isBigEndian())
582       // The odd parts were reversed by getCopyToParts - unreverse them.
583       std::reverse(Parts + RoundParts, Parts + NumParts);
584 
585     NumParts = RoundParts;
586     ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
587     Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
588   }
589 
590   // The number of parts is a power of 2.  Repeatedly bisect the value using
591   // EXTRACT_ELEMENT.
592   Parts[0] = DAG.getNode(ISD::BITCAST, DL,
593                          EVT::getIntegerVT(*DAG.getContext(),
594                                            ValueVT.getSizeInBits()),
595                          Val);
596 
597   for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
598     for (unsigned i = 0; i < NumParts; i += StepSize) {
599       unsigned ThisBits = StepSize * PartBits / 2;
600       EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
601       SDValue &Part0 = Parts[i];
602       SDValue &Part1 = Parts[i+StepSize/2];
603 
604       Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
605                           ThisVT, Part0, DAG.getIntPtrConstant(1, DL));
606       Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
607                           ThisVT, Part0, DAG.getIntPtrConstant(0, DL));
608 
609       if (ThisBits == PartBits && ThisVT != PartVT) {
610         Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0);
611         Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1);
612       }
613     }
614   }
615 
616   if (DAG.getDataLayout().isBigEndian())
617     std::reverse(Parts, Parts + OrigNumParts);
618 }
619 
620 static SDValue widenVectorToPartType(SelectionDAG &DAG,
621                                      SDValue Val, const SDLoc &DL, EVT PartVT) {
622   if (!PartVT.isVector())
623     return SDValue();
624 
625   EVT ValueVT = Val.getValueType();
626   unsigned PartNumElts = PartVT.getVectorNumElements();
627   unsigned ValueNumElts = ValueVT.getVectorNumElements();
628   if (PartNumElts > ValueNumElts &&
629       PartVT.getVectorElementType() == ValueVT.getVectorElementType()) {
630     EVT ElementVT = PartVT.getVectorElementType();
631     // Vector widening case, e.g. <2 x float> -> <4 x float>.  Shuffle in
632     // undef elements.
633     SmallVector<SDValue, 16> Ops;
634     DAG.ExtractVectorElements(Val, Ops);
635     SDValue EltUndef = DAG.getUNDEF(ElementVT);
636     for (unsigned i = ValueNumElts, e = PartNumElts; i != e; ++i)
637       Ops.push_back(EltUndef);
638 
639     // FIXME: Use CONCAT for 2x -> 4x.
640     return DAG.getBuildVector(PartVT, DL, Ops);
641   }
642 
643   return SDValue();
644 }
645 
646 /// getCopyToPartsVector - Create a series of nodes that contain the specified
647 /// value split into legal parts.
648 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
649                                  SDValue Val, SDValue *Parts, unsigned NumParts,
650                                  MVT PartVT, const Value *V,
651                                  Optional<CallingConv::ID> CallConv) {
652   EVT ValueVT = Val.getValueType();
653   assert(ValueVT.isVector() && "Not a vector");
654   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
655   const bool IsABIRegCopy = CallConv.hasValue();
656 
657   if (NumParts == 1) {
658     EVT PartEVT = PartVT;
659     if (PartEVT == ValueVT) {
660       // Nothing to do.
661     } else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
662       // Bitconvert vector->vector case.
663       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
664     } else if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, PartVT)) {
665       Val = Widened;
666     } else if (PartVT.isVector() &&
667                PartEVT.getVectorElementType().bitsGE(
668                  ValueVT.getVectorElementType()) &&
669                PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements()) {
670 
671       // Promoted vector extract
672       Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
673     } else {
674       if (ValueVT.getVectorNumElements() == 1) {
675         Val = DAG.getNode(
676             ISD::EXTRACT_VECTOR_ELT, DL, PartVT, Val,
677             DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
678       } else {
679         assert(PartVT.getSizeInBits() > ValueVT.getSizeInBits() &&
680                "lossy conversion of vector to scalar type");
681         EVT IntermediateType =
682             EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
683         Val = DAG.getBitcast(IntermediateType, Val);
684         Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
685       }
686     }
687 
688     assert(Val.getValueType() == PartVT && "Unexpected vector part value type");
689     Parts[0] = Val;
690     return;
691   }
692 
693   // Handle a multi-element vector.
694   EVT IntermediateVT;
695   MVT RegisterVT;
696   unsigned NumIntermediates;
697   unsigned NumRegs;
698   if (IsABIRegCopy) {
699     NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
700         *DAG.getContext(), CallConv.getValue(), ValueVT, IntermediateVT,
701         NumIntermediates, RegisterVT);
702   } else {
703     NumRegs =
704         TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
705                                    NumIntermediates, RegisterVT);
706   }
707 
708   assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
709   NumParts = NumRegs; // Silence a compiler warning.
710   assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
711 
712   unsigned IntermediateNumElts = IntermediateVT.isVector() ?
713     IntermediateVT.getVectorNumElements() : 1;
714 
715   // Convert the vector to the appropiate type if necessary.
716   unsigned DestVectorNoElts = NumIntermediates * IntermediateNumElts;
717 
718   EVT BuiltVectorTy = EVT::getVectorVT(
719       *DAG.getContext(), IntermediateVT.getScalarType(), DestVectorNoElts);
720   MVT IdxVT = TLI.getVectorIdxTy(DAG.getDataLayout());
721   if (ValueVT != BuiltVectorTy) {
722     if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, BuiltVectorTy))
723       Val = Widened;
724 
725     Val = DAG.getNode(ISD::BITCAST, DL, BuiltVectorTy, Val);
726   }
727 
728   // Split the vector into intermediate operands.
729   SmallVector<SDValue, 8> Ops(NumIntermediates);
730   for (unsigned i = 0; i != NumIntermediates; ++i) {
731     if (IntermediateVT.isVector()) {
732       Ops[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, IntermediateVT, Val,
733                            DAG.getConstant(i * IntermediateNumElts, DL, IdxVT));
734     } else {
735       Ops[i] = DAG.getNode(
736           ISD::EXTRACT_VECTOR_ELT, DL, IntermediateVT, Val,
737           DAG.getConstant(i, DL, IdxVT));
738     }
739   }
740 
741   // Split the intermediate operands into legal parts.
742   if (NumParts == NumIntermediates) {
743     // If the register was not expanded, promote or copy the value,
744     // as appropriate.
745     for (unsigned i = 0; i != NumParts; ++i)
746       getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V, CallConv);
747   } else if (NumParts > 0) {
748     // If the intermediate type was expanded, split each the value into
749     // legal parts.
750     assert(NumIntermediates != 0 && "division by zero");
751     assert(NumParts % NumIntermediates == 0 &&
752            "Must expand into a divisible number of parts!");
753     unsigned Factor = NumParts / NumIntermediates;
754     for (unsigned i = 0; i != NumIntermediates; ++i)
755       getCopyToParts(DAG, DL, Ops[i], &Parts[i * Factor], Factor, PartVT, V,
756                      CallConv);
757   }
758 }
759 
760 RegsForValue::RegsForValue(const SmallVector<unsigned, 4> &regs, MVT regvt,
761                            EVT valuevt, Optional<CallingConv::ID> CC)
762     : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs),
763       RegCount(1, regs.size()), CallConv(CC) {}
764 
765 RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
766                            const DataLayout &DL, unsigned Reg, Type *Ty,
767                            Optional<CallingConv::ID> CC) {
768   ComputeValueVTs(TLI, DL, Ty, ValueVTs);
769 
770   CallConv = CC;
771 
772   for (EVT ValueVT : ValueVTs) {
773     unsigned NumRegs =
774         isABIMangled()
775             ? TLI.getNumRegistersForCallingConv(Context, CC.getValue(), ValueVT)
776             : TLI.getNumRegisters(Context, ValueVT);
777     MVT RegisterVT =
778         isABIMangled()
779             ? TLI.getRegisterTypeForCallingConv(Context, CC.getValue(), ValueVT)
780             : TLI.getRegisterType(Context, ValueVT);
781     for (unsigned i = 0; i != NumRegs; ++i)
782       Regs.push_back(Reg + i);
783     RegVTs.push_back(RegisterVT);
784     RegCount.push_back(NumRegs);
785     Reg += NumRegs;
786   }
787 }
788 
789 SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
790                                       FunctionLoweringInfo &FuncInfo,
791                                       const SDLoc &dl, SDValue &Chain,
792                                       SDValue *Flag, const Value *V) const {
793   // A Value with type {} or [0 x %t] needs no registers.
794   if (ValueVTs.empty())
795     return SDValue();
796 
797   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
798 
799   // Assemble the legal parts into the final values.
800   SmallVector<SDValue, 4> Values(ValueVTs.size());
801   SmallVector<SDValue, 8> Parts;
802   for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
803     // Copy the legal parts from the registers.
804     EVT ValueVT = ValueVTs[Value];
805     unsigned NumRegs = RegCount[Value];
806     MVT RegisterVT = isABIMangled() ? TLI.getRegisterTypeForCallingConv(
807                                           *DAG.getContext(),
808                                           CallConv.getValue(), RegVTs[Value])
809                                     : RegVTs[Value];
810 
811     Parts.resize(NumRegs);
812     for (unsigned i = 0; i != NumRegs; ++i) {
813       SDValue P;
814       if (!Flag) {
815         P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
816       } else {
817         P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag);
818         *Flag = P.getValue(2);
819       }
820 
821       Chain = P.getValue(1);
822       Parts[i] = P;
823 
824       // If the source register was virtual and if we know something about it,
825       // add an assert node.
826       if (!TargetRegisterInfo::isVirtualRegister(Regs[Part+i]) ||
827           !RegisterVT.isInteger())
828         continue;
829 
830       const FunctionLoweringInfo::LiveOutInfo *LOI =
831         FuncInfo.GetLiveOutRegInfo(Regs[Part+i]);
832       if (!LOI)
833         continue;
834 
835       unsigned RegSize = RegisterVT.getScalarSizeInBits();
836       unsigned NumSignBits = LOI->NumSignBits;
837       unsigned NumZeroBits = LOI->Known.countMinLeadingZeros();
838 
839       if (NumZeroBits == RegSize) {
840         // The current value is a zero.
841         // Explicitly express that as it would be easier for
842         // optimizations to kick in.
843         Parts[i] = DAG.getConstant(0, dl, RegisterVT);
844         continue;
845       }
846 
847       // FIXME: We capture more information than the dag can represent.  For
848       // now, just use the tightest assertzext/assertsext possible.
849       bool isSExt;
850       EVT FromVT(MVT::Other);
851       if (NumZeroBits) {
852         FromVT = EVT::getIntegerVT(*DAG.getContext(), RegSize - NumZeroBits);
853         isSExt = false;
854       } else if (NumSignBits > 1) {
855         FromVT =
856             EVT::getIntegerVT(*DAG.getContext(), RegSize - NumSignBits + 1);
857         isSExt = true;
858       } else {
859         continue;
860       }
861       // Add an assertion node.
862       assert(FromVT != MVT::Other);
863       Parts[i] = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
864                              RegisterVT, P, DAG.getValueType(FromVT));
865     }
866 
867     Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(), NumRegs,
868                                      RegisterVT, ValueVT, V, CallConv);
869     Part += NumRegs;
870     Parts.clear();
871   }
872 
873   return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(ValueVTs), Values);
874 }
875 
876 void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG,
877                                  const SDLoc &dl, SDValue &Chain, SDValue *Flag,
878                                  const Value *V,
879                                  ISD::NodeType PreferredExtendType) const {
880   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
881   ISD::NodeType ExtendKind = PreferredExtendType;
882 
883   // Get the list of the values's legal parts.
884   unsigned NumRegs = Regs.size();
885   SmallVector<SDValue, 8> Parts(NumRegs);
886   for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
887     unsigned NumParts = RegCount[Value];
888 
889     MVT RegisterVT = isABIMangled() ? TLI.getRegisterTypeForCallingConv(
890                                           *DAG.getContext(),
891                                           CallConv.getValue(), RegVTs[Value])
892                                     : RegVTs[Value];
893 
894     if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT))
895       ExtendKind = ISD::ZERO_EXTEND;
896 
897     getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value), &Parts[Part],
898                    NumParts, RegisterVT, V, CallConv, ExtendKind);
899     Part += NumParts;
900   }
901 
902   // Copy the parts into the registers.
903   SmallVector<SDValue, 8> Chains(NumRegs);
904   for (unsigned i = 0; i != NumRegs; ++i) {
905     SDValue Part;
906     if (!Flag) {
907       Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
908     } else {
909       Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag);
910       *Flag = Part.getValue(1);
911     }
912 
913     Chains[i] = Part.getValue(0);
914   }
915 
916   if (NumRegs == 1 || Flag)
917     // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is
918     // flagged to it. That is the CopyToReg nodes and the user are considered
919     // a single scheduling unit. If we create a TokenFactor and return it as
920     // chain, then the TokenFactor is both a predecessor (operand) of the
921     // user as well as a successor (the TF operands are flagged to the user).
922     // c1, f1 = CopyToReg
923     // c2, f2 = CopyToReg
924     // c3     = TokenFactor c1, c2
925     // ...
926     //        = op c3, ..., f2
927     Chain = Chains[NumRegs-1];
928   else
929     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
930 }
931 
932 void RegsForValue::AddInlineAsmOperands(unsigned Code, bool HasMatching,
933                                         unsigned MatchingIdx, const SDLoc &dl,
934                                         SelectionDAG &DAG,
935                                         std::vector<SDValue> &Ops) const {
936   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
937 
938   unsigned Flag = InlineAsm::getFlagWord(Code, Regs.size());
939   if (HasMatching)
940     Flag = InlineAsm::getFlagWordForMatchingOp(Flag, MatchingIdx);
941   else if (!Regs.empty() &&
942            TargetRegisterInfo::isVirtualRegister(Regs.front())) {
943     // Put the register class of the virtual registers in the flag word.  That
944     // way, later passes can recompute register class constraints for inline
945     // assembly as well as normal instructions.
946     // Don't do this for tied operands that can use the regclass information
947     // from the def.
948     const MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
949     const TargetRegisterClass *RC = MRI.getRegClass(Regs.front());
950     Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID());
951   }
952 
953   SDValue Res = DAG.getTargetConstant(Flag, dl, MVT::i32);
954   Ops.push_back(Res);
955 
956   if (Code == InlineAsm::Kind_Clobber) {
957     // Clobbers should always have a 1:1 mapping with registers, and may
958     // reference registers that have illegal (e.g. vector) types. Hence, we
959     // shouldn't try to apply any sort of splitting logic to them.
960     assert(Regs.size() == RegVTs.size() && Regs.size() == ValueVTs.size() &&
961            "No 1:1 mapping from clobbers to regs?");
962     unsigned SP = TLI.getStackPointerRegisterToSaveRestore();
963     (void)SP;
964     for (unsigned I = 0, E = ValueVTs.size(); I != E; ++I) {
965       Ops.push_back(DAG.getRegister(Regs[I], RegVTs[I]));
966       assert(
967           (Regs[I] != SP ||
968            DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment()) &&
969           "If we clobbered the stack pointer, MFI should know about it.");
970     }
971     return;
972   }
973 
974   for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
975     unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value]);
976     MVT RegisterVT = RegVTs[Value];
977     for (unsigned i = 0; i != NumRegs; ++i) {
978       assert(Reg < Regs.size() && "Mismatch in # registers expected");
979       unsigned TheReg = Regs[Reg++];
980       Ops.push_back(DAG.getRegister(TheReg, RegisterVT));
981     }
982   }
983 }
984 
985 SmallVector<std::pair<unsigned, unsigned>, 4>
986 RegsForValue::getRegsAndSizes() const {
987   SmallVector<std::pair<unsigned, unsigned>, 4> OutVec;
988   unsigned I = 0;
989   for (auto CountAndVT : zip_first(RegCount, RegVTs)) {
990     unsigned RegCount = std::get<0>(CountAndVT);
991     MVT RegisterVT = std::get<1>(CountAndVT);
992     unsigned RegisterSize = RegisterVT.getSizeInBits();
993     for (unsigned E = I + RegCount; I != E; ++I)
994       OutVec.push_back(std::make_pair(Regs[I], RegisterSize));
995   }
996   return OutVec;
997 }
998 
999 void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis *aa,
1000                                const TargetLibraryInfo *li) {
1001   AA = aa;
1002   GFI = gfi;
1003   LibInfo = li;
1004   DL = &DAG.getDataLayout();
1005   Context = DAG.getContext();
1006   LPadToCallSiteMap.clear();
1007 }
1008 
1009 void SelectionDAGBuilder::clear() {
1010   NodeMap.clear();
1011   UnusedArgNodeMap.clear();
1012   PendingLoads.clear();
1013   PendingExports.clear();
1014   CurInst = nullptr;
1015   HasTailCall = false;
1016   SDNodeOrder = LowestSDNodeOrder;
1017   StatepointLowering.clear();
1018 }
1019 
1020 void SelectionDAGBuilder::clearDanglingDebugInfo() {
1021   DanglingDebugInfoMap.clear();
1022 }
1023 
1024 SDValue SelectionDAGBuilder::getRoot() {
1025   if (PendingLoads.empty())
1026     return DAG.getRoot();
1027 
1028   if (PendingLoads.size() == 1) {
1029     SDValue Root = PendingLoads[0];
1030     DAG.setRoot(Root);
1031     PendingLoads.clear();
1032     return Root;
1033   }
1034 
1035   // Otherwise, we have to make a token factor node.
1036   SDValue Root = DAG.getTokenFactor(getCurSDLoc(), PendingLoads);
1037   PendingLoads.clear();
1038   DAG.setRoot(Root);
1039   return Root;
1040 }
1041 
1042 SDValue SelectionDAGBuilder::getControlRoot() {
1043   SDValue Root = DAG.getRoot();
1044 
1045   if (PendingExports.empty())
1046     return Root;
1047 
1048   // Turn all of the CopyToReg chains into one factored node.
1049   if (Root.getOpcode() != ISD::EntryToken) {
1050     unsigned i = 0, e = PendingExports.size();
1051     for (; i != e; ++i) {
1052       assert(PendingExports[i].getNode()->getNumOperands() > 1);
1053       if (PendingExports[i].getNode()->getOperand(0) == Root)
1054         break;  // Don't add the root if we already indirectly depend on it.
1055     }
1056 
1057     if (i == e)
1058       PendingExports.push_back(Root);
1059   }
1060 
1061   Root = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
1062                      PendingExports);
1063   PendingExports.clear();
1064   DAG.setRoot(Root);
1065   return Root;
1066 }
1067 
1068 void SelectionDAGBuilder::visit(const Instruction &I) {
1069   // Set up outgoing PHI node register values before emitting the terminator.
1070   if (I.isTerminator()) {
1071     HandlePHINodesInSuccessorBlocks(I.getParent());
1072   }
1073 
1074   // Increase the SDNodeOrder if dealing with a non-debug instruction.
1075   if (!isa<DbgInfoIntrinsic>(I))
1076     ++SDNodeOrder;
1077 
1078   CurInst = &I;
1079 
1080   visit(I.getOpcode(), I);
1081 
1082   if (auto *FPMO = dyn_cast<FPMathOperator>(&I)) {
1083     // Propagate the fast-math-flags of this IR instruction to the DAG node that
1084     // maps to this instruction.
1085     // TODO: We could handle all flags (nsw, etc) here.
1086     // TODO: If an IR instruction maps to >1 node, only the final node will have
1087     //       flags set.
1088     if (SDNode *Node = getNodeForIRValue(&I)) {
1089       SDNodeFlags IncomingFlags;
1090       IncomingFlags.copyFMF(*FPMO);
1091       if (!Node->getFlags().isDefined())
1092         Node->setFlags(IncomingFlags);
1093       else
1094         Node->intersectFlagsWith(IncomingFlags);
1095     }
1096   }
1097 
1098   if (!I.isTerminator() && !HasTailCall &&
1099       !isStatepoint(&I)) // statepoints handle their exports internally
1100     CopyToExportRegsIfNeeded(&I);
1101 
1102   CurInst = nullptr;
1103 }
1104 
1105 void SelectionDAGBuilder::visitPHI(const PHINode &) {
1106   llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!");
1107 }
1108 
1109 void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) {
1110   // Note: this doesn't use InstVisitor, because it has to work with
1111   // ConstantExpr's in addition to instructions.
1112   switch (Opcode) {
1113   default: llvm_unreachable("Unknown instruction type encountered!");
1114     // Build the switch statement using the Instruction.def file.
1115 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1116     case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
1117 #include "llvm/IR/Instruction.def"
1118   }
1119 }
1120 
1121 void SelectionDAGBuilder::dropDanglingDebugInfo(const DILocalVariable *Variable,
1122                                                 const DIExpression *Expr) {
1123   auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) {
1124     const DbgValueInst *DI = DDI.getDI();
1125     DIVariable *DanglingVariable = DI->getVariable();
1126     DIExpression *DanglingExpr = DI->getExpression();
1127     if (DanglingVariable == Variable && Expr->fragmentsOverlap(DanglingExpr)) {
1128       LLVM_DEBUG(dbgs() << "Dropping dangling debug info for " << *DI << "\n");
1129       return true;
1130     }
1131     return false;
1132   };
1133 
1134   for (auto &DDIMI : DanglingDebugInfoMap) {
1135     DanglingDebugInfoVector &DDIV = DDIMI.second;
1136 
1137     // If debug info is to be dropped, run it through final checks to see
1138     // whether it can be salvaged.
1139     for (auto &DDI : DDIV)
1140       if (isMatchingDbgValue(DDI))
1141         salvageUnresolvedDbgValue(DDI);
1142 
1143     DDIV.erase(remove_if(DDIV, isMatchingDbgValue), DDIV.end());
1144   }
1145 }
1146 
1147 // resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V,
1148 // generate the debug data structures now that we've seen its definition.
1149 void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V,
1150                                                    SDValue Val) {
1151   auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V);
1152   if (DanglingDbgInfoIt == DanglingDebugInfoMap.end())
1153     return;
1154 
1155   DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
1156   for (auto &DDI : DDIV) {
1157     const DbgValueInst *DI = DDI.getDI();
1158     assert(DI && "Ill-formed DanglingDebugInfo");
1159     DebugLoc dl = DDI.getdl();
1160     unsigned ValSDNodeOrder = Val.getNode()->getIROrder();
1161     unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
1162     DILocalVariable *Variable = DI->getVariable();
1163     DIExpression *Expr = DI->getExpression();
1164     assert(Variable->isValidLocationForIntrinsic(dl) &&
1165            "Expected inlined-at fields to agree");
1166     SDDbgValue *SDV;
1167     if (Val.getNode()) {
1168       // FIXME: I doubt that it is correct to resolve a dangling DbgValue as a
1169       // FuncArgumentDbgValue (it would be hoisted to the function entry, and if
1170       // we couldn't resolve it directly when examining the DbgValue intrinsic
1171       // in the first place we should not be more successful here). Unless we
1172       // have some test case that prove this to be correct we should avoid
1173       // calling EmitFuncArgumentDbgValue here.
1174       if (!EmitFuncArgumentDbgValue(V, Variable, Expr, dl, false, Val)) {
1175         LLVM_DEBUG(dbgs() << "Resolve dangling debug info [order="
1176                           << DbgSDNodeOrder << "] for:\n  " << *DI << "\n");
1177         LLVM_DEBUG(dbgs() << "  By mapping to:\n    "; Val.dump());
1178         // Increase the SDNodeOrder for the DbgValue here to make sure it is
1179         // inserted after the definition of Val when emitting the instructions
1180         // after ISel. An alternative could be to teach
1181         // ScheduleDAGSDNodes::EmitSchedule to delay the insertion properly.
1182         LLVM_DEBUG(if (ValSDNodeOrder > DbgSDNodeOrder) dbgs()
1183                    << "changing SDNodeOrder from " << DbgSDNodeOrder << " to "
1184                    << ValSDNodeOrder << "\n");
1185         SDV = getDbgValue(Val, Variable, Expr, dl,
1186                           std::max(DbgSDNodeOrder, ValSDNodeOrder));
1187         DAG.AddDbgValue(SDV, Val.getNode(), false);
1188       } else
1189         LLVM_DEBUG(dbgs() << "Resolved dangling debug info for " << *DI
1190                           << "in EmitFuncArgumentDbgValue\n");
1191     } else {
1192       LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1193       auto Undef =
1194           UndefValue::get(DDI.getDI()->getVariableLocation()->getType());
1195       auto SDV =
1196           DAG.getConstantDbgValue(Variable, Expr, Undef, dl, DbgSDNodeOrder);
1197       DAG.AddDbgValue(SDV, nullptr, false);
1198     }
1199   }
1200   DDIV.clear();
1201 }
1202 
1203 void SelectionDAGBuilder::salvageUnresolvedDbgValue(DanglingDebugInfo &DDI) {
1204   Value *V = DDI.getDI()->getValue();
1205   DILocalVariable *Var = DDI.getDI()->getVariable();
1206   DIExpression *Expr = DDI.getDI()->getExpression();
1207   DebugLoc DL = DDI.getdl();
1208   DebugLoc InstDL = DDI.getDI()->getDebugLoc();
1209   unsigned SDOrder = DDI.getSDNodeOrder();
1210 
1211   // Currently we consider only dbg.value intrinsics -- we tell the salvager
1212   // that DW_OP_stack_value is desired.
1213   assert(isa<DbgValueInst>(DDI.getDI()));
1214   bool StackValue = true;
1215 
1216   // Can this Value can be encoded without any further work?
1217   if (handleDebugValue(V, Var, Expr, DL, InstDL, SDOrder))
1218     return;
1219 
1220   // Attempt to salvage back through as many instructions as possible. Bail if
1221   // a non-instruction is seen, such as a constant expression or global
1222   // variable. FIXME: Further work could recover those too.
1223   while (isa<Instruction>(V)) {
1224     Instruction &VAsInst = *cast<Instruction>(V);
1225     DIExpression *NewExpr = salvageDebugInfoImpl(VAsInst, Expr, StackValue);
1226 
1227     // If we cannot salvage any further, and haven't yet found a suitable debug
1228     // expression, bail out.
1229     if (!NewExpr)
1230       break;
1231 
1232     // New value and expr now represent this debuginfo.
1233     V = VAsInst.getOperand(0);
1234     Expr = NewExpr;
1235 
1236     // Some kind of simplification occurred: check whether the operand of the
1237     // salvaged debug expression can be encoded in this DAG.
1238     if (handleDebugValue(V, Var, Expr, DL, InstDL, SDOrder)) {
1239       LLVM_DEBUG(dbgs() << "Salvaged debug location info for:\n  "
1240                         << DDI.getDI() << "\nBy stripping back to:\n  " << V);
1241       return;
1242     }
1243   }
1244 
1245   // This was the final opportunity to salvage this debug information, and it
1246   // couldn't be done. Place an undef DBG_VALUE at this location to terminate
1247   // any earlier variable location.
1248   auto Undef = UndefValue::get(DDI.getDI()->getVariableLocation()->getType());
1249   auto SDV = DAG.getConstantDbgValue(Var, Expr, Undef, DL, SDNodeOrder);
1250   DAG.AddDbgValue(SDV, nullptr, false);
1251 
1252   LLVM_DEBUG(dbgs() << "Dropping debug value info for:\n  " << DDI.getDI()
1253                     << "\n");
1254   LLVM_DEBUG(dbgs() << "  Last seen at:\n    " << *DDI.getDI()->getOperand(0)
1255                     << "\n");
1256 }
1257 
1258 bool SelectionDAGBuilder::handleDebugValue(const Value *V, DILocalVariable *Var,
1259                                            DIExpression *Expr, DebugLoc dl,
1260                                            DebugLoc InstDL, unsigned Order) {
1261   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1262   SDDbgValue *SDV;
1263   if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V) ||
1264       isa<ConstantPointerNull>(V)) {
1265     SDV = DAG.getConstantDbgValue(Var, Expr, V, dl, SDNodeOrder);
1266     DAG.AddDbgValue(SDV, nullptr, false);
1267     return true;
1268   }
1269 
1270   // If the Value is a frame index, we can create a FrameIndex debug value
1271   // without relying on the DAG at all.
1272   if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1273     auto SI = FuncInfo.StaticAllocaMap.find(AI);
1274     if (SI != FuncInfo.StaticAllocaMap.end()) {
1275       auto SDV =
1276           DAG.getFrameIndexDbgValue(Var, Expr, SI->second,
1277                                     /*IsIndirect*/ false, dl, SDNodeOrder);
1278       // Do not attach the SDNodeDbgValue to an SDNode: this variable location
1279       // is still available even if the SDNode gets optimized out.
1280       DAG.AddDbgValue(SDV, nullptr, false);
1281       return true;
1282     }
1283   }
1284 
1285   // Do not use getValue() in here; we don't want to generate code at
1286   // this point if it hasn't been done yet.
1287   SDValue N = NodeMap[V];
1288   if (!N.getNode() && isa<Argument>(V)) // Check unused arguments map.
1289     N = UnusedArgNodeMap[V];
1290   if (N.getNode()) {
1291     if (EmitFuncArgumentDbgValue(V, Var, Expr, dl, false, N))
1292       return true;
1293     SDV = getDbgValue(N, Var, Expr, dl, SDNodeOrder);
1294     DAG.AddDbgValue(SDV, N.getNode(), false);
1295     return true;
1296   }
1297 
1298   // Special rules apply for the first dbg.values of parameter variables in a
1299   // function. Identify them by the fact they reference Argument Values, that
1300   // they're parameters, and they are parameters of the current function. We
1301   // need to let them dangle until they get an SDNode.
1302   bool IsParamOfFunc = isa<Argument>(V) && Var->isParameter() &&
1303                        !InstDL.getInlinedAt();
1304   if (!IsParamOfFunc) {
1305     // The value is not used in this block yet (or it would have an SDNode).
1306     // We still want the value to appear for the user if possible -- if it has
1307     // an associated VReg, we can refer to that instead.
1308     auto VMI = FuncInfo.ValueMap.find(V);
1309     if (VMI != FuncInfo.ValueMap.end()) {
1310       unsigned Reg = VMI->second;
1311       // If this is a PHI node, it may be split up into several MI PHI nodes
1312       // (in FunctionLoweringInfo::set).
1313       RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg,
1314                        V->getType(), None);
1315       if (RFV.occupiesMultipleRegs()) {
1316         unsigned Offset = 0;
1317         unsigned BitsToDescribe = 0;
1318         if (auto VarSize = Var->getSizeInBits())
1319           BitsToDescribe = *VarSize;
1320         if (auto Fragment = Expr->getFragmentInfo())
1321           BitsToDescribe = Fragment->SizeInBits;
1322         for (auto RegAndSize : RFV.getRegsAndSizes()) {
1323           unsigned RegisterSize = RegAndSize.second;
1324           // Bail out if all bits are described already.
1325           if (Offset >= BitsToDescribe)
1326             break;
1327           unsigned FragmentSize = (Offset + RegisterSize > BitsToDescribe)
1328               ? BitsToDescribe - Offset
1329               : RegisterSize;
1330           auto FragmentExpr = DIExpression::createFragmentExpression(
1331               Expr, Offset, FragmentSize);
1332           if (!FragmentExpr)
1333               continue;
1334           SDV = DAG.getVRegDbgValue(Var, *FragmentExpr, RegAndSize.first,
1335                                     false, dl, SDNodeOrder);
1336           DAG.AddDbgValue(SDV, nullptr, false);
1337           Offset += RegisterSize;
1338         }
1339       } else {
1340         SDV = DAG.getVRegDbgValue(Var, Expr, Reg, false, dl, SDNodeOrder);
1341         DAG.AddDbgValue(SDV, nullptr, false);
1342       }
1343       return true;
1344     }
1345   }
1346 
1347   return false;
1348 }
1349 
1350 void SelectionDAGBuilder::resolveOrClearDbgInfo() {
1351   // Try to fixup any remaining dangling debug info -- and drop it if we can't.
1352   for (auto &Pair : DanglingDebugInfoMap)
1353     for (auto &DDI : Pair.getSecond())
1354       salvageUnresolvedDbgValue(DDI);
1355   clearDanglingDebugInfo();
1356 }
1357 
1358 /// getCopyFromRegs - If there was virtual register allocated for the value V
1359 /// emit CopyFromReg of the specified type Ty. Return empty SDValue() otherwise.
1360 SDValue SelectionDAGBuilder::getCopyFromRegs(const Value *V, Type *Ty) {
1361   DenseMap<const Value *, unsigned>::iterator It = FuncInfo.ValueMap.find(V);
1362   SDValue Result;
1363 
1364   if (It != FuncInfo.ValueMap.end()) {
1365     unsigned InReg = It->second;
1366 
1367     RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
1368                      DAG.getDataLayout(), InReg, Ty,
1369                      None); // This is not an ABI copy.
1370     SDValue Chain = DAG.getEntryNode();
1371     Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr,
1372                                  V);
1373     resolveDanglingDebugInfo(V, Result);
1374   }
1375 
1376   return Result;
1377 }
1378 
1379 /// getValue - Return an SDValue for the given Value.
1380 SDValue SelectionDAGBuilder::getValue(const Value *V) {
1381   // If we already have an SDValue for this value, use it. It's important
1382   // to do this first, so that we don't create a CopyFromReg if we already
1383   // have a regular SDValue.
1384   SDValue &N = NodeMap[V];
1385   if (N.getNode()) return N;
1386 
1387   // If there's a virtual register allocated and initialized for this
1388   // value, use it.
1389   if (SDValue copyFromReg = getCopyFromRegs(V, V->getType()))
1390     return copyFromReg;
1391 
1392   // Otherwise create a new SDValue and remember it.
1393   SDValue Val = getValueImpl(V);
1394   NodeMap[V] = Val;
1395   resolveDanglingDebugInfo(V, Val);
1396   return Val;
1397 }
1398 
1399 // Return true if SDValue exists for the given Value
1400 bool SelectionDAGBuilder::findValue(const Value *V) const {
1401   return (NodeMap.find(V) != NodeMap.end()) ||
1402     (FuncInfo.ValueMap.find(V) != FuncInfo.ValueMap.end());
1403 }
1404 
1405 /// getNonRegisterValue - Return an SDValue for the given Value, but
1406 /// don't look in FuncInfo.ValueMap for a virtual register.
1407 SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) {
1408   // If we already have an SDValue for this value, use it.
1409   SDValue &N = NodeMap[V];
1410   if (N.getNode()) {
1411     if (isa<ConstantSDNode>(N) || isa<ConstantFPSDNode>(N)) {
1412       // Remove the debug location from the node as the node is about to be used
1413       // in a location which may differ from the original debug location.  This
1414       // is relevant to Constant and ConstantFP nodes because they can appear
1415       // as constant expressions inside PHI nodes.
1416       N->setDebugLoc(DebugLoc());
1417     }
1418     return N;
1419   }
1420 
1421   // Otherwise create a new SDValue and remember it.
1422   SDValue Val = getValueImpl(V);
1423   NodeMap[V] = Val;
1424   resolveDanglingDebugInfo(V, Val);
1425   return Val;
1426 }
1427 
1428 /// getValueImpl - Helper function for getValue and getNonRegisterValue.
1429 /// Create an SDValue for the given value.
1430 SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
1431   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1432 
1433   if (const Constant *C = dyn_cast<Constant>(V)) {
1434     EVT VT = TLI.getValueType(DAG.getDataLayout(), V->getType(), true);
1435 
1436     if (const ConstantInt *CI = dyn_cast<ConstantInt>(C))
1437       return DAG.getConstant(*CI, getCurSDLoc(), VT);
1438 
1439     if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
1440       return DAG.getGlobalAddress(GV, getCurSDLoc(), VT);
1441 
1442     if (isa<ConstantPointerNull>(C)) {
1443       unsigned AS = V->getType()->getPointerAddressSpace();
1444       return DAG.getConstant(0, getCurSDLoc(),
1445                              TLI.getPointerTy(DAG.getDataLayout(), AS));
1446     }
1447 
1448     if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
1449       return DAG.getConstantFP(*CFP, getCurSDLoc(), VT);
1450 
1451     if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
1452       return DAG.getUNDEF(VT);
1453 
1454     if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
1455       visit(CE->getOpcode(), *CE);
1456       SDValue N1 = NodeMap[V];
1457       assert(N1.getNode() && "visit didn't populate the NodeMap!");
1458       return N1;
1459     }
1460 
1461     if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
1462       SmallVector<SDValue, 4> Constants;
1463       for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end();
1464            OI != OE; ++OI) {
1465         SDNode *Val = getValue(*OI).getNode();
1466         // If the operand is an empty aggregate, there are no values.
1467         if (!Val) continue;
1468         // Add each leaf value from the operand to the Constants list
1469         // to form a flattened list of all the values.
1470         for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1471           Constants.push_back(SDValue(Val, i));
1472       }
1473 
1474       return DAG.getMergeValues(Constants, getCurSDLoc());
1475     }
1476 
1477     if (const ConstantDataSequential *CDS =
1478           dyn_cast<ConstantDataSequential>(C)) {
1479       SmallVector<SDValue, 4> Ops;
1480       for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1481         SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode();
1482         // Add each leaf value from the operand to the Constants list
1483         // to form a flattened list of all the values.
1484         for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1485           Ops.push_back(SDValue(Val, i));
1486       }
1487 
1488       if (isa<ArrayType>(CDS->getType()))
1489         return DAG.getMergeValues(Ops, getCurSDLoc());
1490       return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1491     }
1492 
1493     if (C->getType()->isStructTy() || C->getType()->isArrayTy()) {
1494       assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
1495              "Unknown struct or array constant!");
1496 
1497       SmallVector<EVT, 4> ValueVTs;
1498       ComputeValueVTs(TLI, DAG.getDataLayout(), C->getType(), ValueVTs);
1499       unsigned NumElts = ValueVTs.size();
1500       if (NumElts == 0)
1501         return SDValue(); // empty struct
1502       SmallVector<SDValue, 4> Constants(NumElts);
1503       for (unsigned i = 0; i != NumElts; ++i) {
1504         EVT EltVT = ValueVTs[i];
1505         if (isa<UndefValue>(C))
1506           Constants[i] = DAG.getUNDEF(EltVT);
1507         else if (EltVT.isFloatingPoint())
1508           Constants[i] = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1509         else
1510           Constants[i] = DAG.getConstant(0, getCurSDLoc(), EltVT);
1511       }
1512 
1513       return DAG.getMergeValues(Constants, getCurSDLoc());
1514     }
1515 
1516     if (const BlockAddress *BA = dyn_cast<BlockAddress>(C))
1517       return DAG.getBlockAddress(BA, VT);
1518 
1519     VectorType *VecTy = cast<VectorType>(V->getType());
1520     unsigned NumElements = VecTy->getNumElements();
1521 
1522     // Now that we know the number and type of the elements, get that number of
1523     // elements into the Ops array based on what kind of constant it is.
1524     SmallVector<SDValue, 16> Ops;
1525     if (const ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
1526       for (unsigned i = 0; i != NumElements; ++i)
1527         Ops.push_back(getValue(CV->getOperand(i)));
1528     } else {
1529       assert(isa<ConstantAggregateZero>(C) && "Unknown vector constant!");
1530       EVT EltVT =
1531           TLI.getValueType(DAG.getDataLayout(), VecTy->getElementType());
1532 
1533       SDValue Op;
1534       if (EltVT.isFloatingPoint())
1535         Op = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1536       else
1537         Op = DAG.getConstant(0, getCurSDLoc(), EltVT);
1538       Ops.assign(NumElements, Op);
1539     }
1540 
1541     // Create a BUILD_VECTOR node.
1542     return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1543   }
1544 
1545   // If this is a static alloca, generate it as the frameindex instead of
1546   // computation.
1547   if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1548     DenseMap<const AllocaInst*, int>::iterator SI =
1549       FuncInfo.StaticAllocaMap.find(AI);
1550     if (SI != FuncInfo.StaticAllocaMap.end())
1551       return DAG.getFrameIndex(SI->second,
1552                                TLI.getFrameIndexTy(DAG.getDataLayout()));
1553   }
1554 
1555   // If this is an instruction which fast-isel has deferred, select it now.
1556   if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
1557     unsigned InReg = FuncInfo.InitializeRegForValue(Inst);
1558 
1559     RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg,
1560                      Inst->getType(), getABIRegCopyCC(V));
1561     SDValue Chain = DAG.getEntryNode();
1562     return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
1563   }
1564 
1565   llvm_unreachable("Can't get register for value!");
1566 }
1567 
1568 void SelectionDAGBuilder::visitCatchPad(const CatchPadInst &I) {
1569   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1570   bool IsMSVCCXX = Pers == EHPersonality::MSVC_CXX;
1571   bool IsCoreCLR = Pers == EHPersonality::CoreCLR;
1572   bool IsSEH = isAsynchronousEHPersonality(Pers);
1573   bool IsWasmCXX = Pers == EHPersonality::Wasm_CXX;
1574   MachineBasicBlock *CatchPadMBB = FuncInfo.MBB;
1575   if (!IsSEH)
1576     CatchPadMBB->setIsEHScopeEntry();
1577   // In MSVC C++ and CoreCLR, catchblocks are funclets and need prologues.
1578   if (IsMSVCCXX || IsCoreCLR)
1579     CatchPadMBB->setIsEHFuncletEntry();
1580   // Wasm does not need catchpads anymore
1581   if (!IsWasmCXX)
1582     DAG.setRoot(DAG.getNode(ISD::CATCHPAD, getCurSDLoc(), MVT::Other,
1583                             getControlRoot()));
1584 }
1585 
1586 void SelectionDAGBuilder::visitCatchRet(const CatchReturnInst &I) {
1587   // Update machine-CFG edge.
1588   MachineBasicBlock *TargetMBB = FuncInfo.MBBMap[I.getSuccessor()];
1589   FuncInfo.MBB->addSuccessor(TargetMBB);
1590 
1591   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1592   bool IsSEH = isAsynchronousEHPersonality(Pers);
1593   if (IsSEH) {
1594     // If this is not a fall-through branch or optimizations are switched off,
1595     // emit the branch.
1596     if (TargetMBB != NextBlock(FuncInfo.MBB) ||
1597         TM.getOptLevel() == CodeGenOpt::None)
1598       DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
1599                               getControlRoot(), DAG.getBasicBlock(TargetMBB)));
1600     return;
1601   }
1602 
1603   // Figure out the funclet membership for the catchret's successor.
1604   // This will be used by the FuncletLayout pass to determine how to order the
1605   // BB's.
1606   // A 'catchret' returns to the outer scope's color.
1607   Value *ParentPad = I.getCatchSwitchParentPad();
1608   const BasicBlock *SuccessorColor;
1609   if (isa<ConstantTokenNone>(ParentPad))
1610     SuccessorColor = &FuncInfo.Fn->getEntryBlock();
1611   else
1612     SuccessorColor = cast<Instruction>(ParentPad)->getParent();
1613   assert(SuccessorColor && "No parent funclet for catchret!");
1614   MachineBasicBlock *SuccessorColorMBB = FuncInfo.MBBMap[SuccessorColor];
1615   assert(SuccessorColorMBB && "No MBB for SuccessorColor!");
1616 
1617   // Create the terminator node.
1618   SDValue Ret = DAG.getNode(ISD::CATCHRET, getCurSDLoc(), MVT::Other,
1619                             getControlRoot(), DAG.getBasicBlock(TargetMBB),
1620                             DAG.getBasicBlock(SuccessorColorMBB));
1621   DAG.setRoot(Ret);
1622 }
1623 
1624 void SelectionDAGBuilder::visitCleanupPad(const CleanupPadInst &CPI) {
1625   // Don't emit any special code for the cleanuppad instruction. It just marks
1626   // the start of an EH scope/funclet.
1627   FuncInfo.MBB->setIsEHScopeEntry();
1628   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1629   if (Pers != EHPersonality::Wasm_CXX) {
1630     FuncInfo.MBB->setIsEHFuncletEntry();
1631     FuncInfo.MBB->setIsCleanupFuncletEntry();
1632   }
1633 }
1634 
1635 // For wasm, there's alwyas a single catch pad attached to a catchswitch, and
1636 // the control flow always stops at the single catch pad, as it does for a
1637 // cleanup pad. In case the exception caught is not of the types the catch pad
1638 // catches, it will be rethrown by a rethrow.
1639 static void findWasmUnwindDestinations(
1640     FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
1641     BranchProbability Prob,
1642     SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
1643         &UnwindDests) {
1644   while (EHPadBB) {
1645     const Instruction *Pad = EHPadBB->getFirstNonPHI();
1646     if (isa<CleanupPadInst>(Pad)) {
1647       // Stop on cleanup pads.
1648       UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1649       UnwindDests.back().first->setIsEHScopeEntry();
1650       break;
1651     } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
1652       // Add the catchpad handlers to the possible destinations. We don't
1653       // continue to the unwind destination of the catchswitch for wasm.
1654       for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
1655         UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob);
1656         UnwindDests.back().first->setIsEHScopeEntry();
1657       }
1658       break;
1659     } else {
1660       continue;
1661     }
1662   }
1663 }
1664 
1665 /// When an invoke or a cleanupret unwinds to the next EH pad, there are
1666 /// many places it could ultimately go. In the IR, we have a single unwind
1667 /// destination, but in the machine CFG, we enumerate all the possible blocks.
1668 /// This function skips over imaginary basic blocks that hold catchswitch
1669 /// instructions, and finds all the "real" machine
1670 /// basic block destinations. As those destinations may not be successors of
1671 /// EHPadBB, here we also calculate the edge probability to those destinations.
1672 /// The passed-in Prob is the edge probability to EHPadBB.
1673 static void findUnwindDestinations(
1674     FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
1675     BranchProbability Prob,
1676     SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
1677         &UnwindDests) {
1678   EHPersonality Personality =
1679     classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1680   bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX;
1681   bool IsCoreCLR = Personality == EHPersonality::CoreCLR;
1682   bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX;
1683   bool IsSEH = isAsynchronousEHPersonality(Personality);
1684 
1685   if (IsWasmCXX) {
1686     findWasmUnwindDestinations(FuncInfo, EHPadBB, Prob, UnwindDests);
1687     assert(UnwindDests.size() <= 1 &&
1688            "There should be at most one unwind destination for wasm");
1689     return;
1690   }
1691 
1692   while (EHPadBB) {
1693     const Instruction *Pad = EHPadBB->getFirstNonPHI();
1694     BasicBlock *NewEHPadBB = nullptr;
1695     if (isa<LandingPadInst>(Pad)) {
1696       // Stop on landingpads. They are not funclets.
1697       UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1698       break;
1699     } else if (isa<CleanupPadInst>(Pad)) {
1700       // Stop on cleanup pads. Cleanups are always funclet entries for all known
1701       // personalities.
1702       UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1703       UnwindDests.back().first->setIsEHScopeEntry();
1704       UnwindDests.back().first->setIsEHFuncletEntry();
1705       break;
1706     } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
1707       // Add the catchpad handlers to the possible destinations.
1708       for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
1709         UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob);
1710         // For MSVC++ and the CLR, catchblocks are funclets and need prologues.
1711         if (IsMSVCCXX || IsCoreCLR)
1712           UnwindDests.back().first->setIsEHFuncletEntry();
1713         if (!IsSEH)
1714           UnwindDests.back().first->setIsEHScopeEntry();
1715       }
1716       NewEHPadBB = CatchSwitch->getUnwindDest();
1717     } else {
1718       continue;
1719     }
1720 
1721     BranchProbabilityInfo *BPI = FuncInfo.BPI;
1722     if (BPI && NewEHPadBB)
1723       Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB);
1724     EHPadBB = NewEHPadBB;
1725   }
1726 }
1727 
1728 void SelectionDAGBuilder::visitCleanupRet(const CleanupReturnInst &I) {
1729   // Update successor info.
1730   SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
1731   auto UnwindDest = I.getUnwindDest();
1732   BranchProbabilityInfo *BPI = FuncInfo.BPI;
1733   BranchProbability UnwindDestProb =
1734       (BPI && UnwindDest)
1735           ? BPI->getEdgeProbability(FuncInfo.MBB->getBasicBlock(), UnwindDest)
1736           : BranchProbability::getZero();
1737   findUnwindDestinations(FuncInfo, UnwindDest, UnwindDestProb, UnwindDests);
1738   for (auto &UnwindDest : UnwindDests) {
1739     UnwindDest.first->setIsEHPad();
1740     addSuccessorWithProb(FuncInfo.MBB, UnwindDest.first, UnwindDest.second);
1741   }
1742   FuncInfo.MBB->normalizeSuccProbs();
1743 
1744   // Create the terminator node.
1745   SDValue Ret =
1746       DAG.getNode(ISD::CLEANUPRET, getCurSDLoc(), MVT::Other, getControlRoot());
1747   DAG.setRoot(Ret);
1748 }
1749 
1750 void SelectionDAGBuilder::visitCatchSwitch(const CatchSwitchInst &CSI) {
1751   report_fatal_error("visitCatchSwitch not yet implemented!");
1752 }
1753 
1754 void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
1755   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1756   auto &DL = DAG.getDataLayout();
1757   SDValue Chain = getControlRoot();
1758   SmallVector<ISD::OutputArg, 8> Outs;
1759   SmallVector<SDValue, 8> OutVals;
1760 
1761   // Calls to @llvm.experimental.deoptimize don't generate a return value, so
1762   // lower
1763   //
1764   //   %val = call <ty> @llvm.experimental.deoptimize()
1765   //   ret <ty> %val
1766   //
1767   // differently.
1768   if (I.getParent()->getTerminatingDeoptimizeCall()) {
1769     LowerDeoptimizingReturn();
1770     return;
1771   }
1772 
1773   if (!FuncInfo.CanLowerReturn) {
1774     unsigned DemoteReg = FuncInfo.DemoteRegister;
1775     const Function *F = I.getParent()->getParent();
1776 
1777     // Emit a store of the return value through the virtual register.
1778     // Leave Outs empty so that LowerReturn won't try to load return
1779     // registers the usual way.
1780     SmallVector<EVT, 1> PtrValueVTs;
1781     ComputeValueVTs(TLI, DL,
1782                     F->getReturnType()->getPointerTo(
1783                         DAG.getDataLayout().getAllocaAddrSpace()),
1784                     PtrValueVTs);
1785 
1786     SDValue RetPtr = DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(),
1787                                         DemoteReg, PtrValueVTs[0]);
1788     SDValue RetOp = getValue(I.getOperand(0));
1789 
1790     SmallVector<EVT, 4> ValueVTs;
1791     SmallVector<uint64_t, 4> Offsets;
1792     ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs, &Offsets);
1793     unsigned NumValues = ValueVTs.size();
1794 
1795     SmallVector<SDValue, 4> Chains(NumValues);
1796     for (unsigned i = 0; i != NumValues; ++i) {
1797       // An aggregate return value cannot wrap around the address space, so
1798       // offsets to its parts don't wrap either.
1799       SDValue Ptr = DAG.getObjectPtrOffset(getCurSDLoc(), RetPtr, Offsets[i]);
1800       Chains[i] = DAG.getStore(
1801           Chain, getCurSDLoc(), SDValue(RetOp.getNode(), RetOp.getResNo() + i),
1802           // FIXME: better loc info would be nice.
1803           Ptr, MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()));
1804     }
1805 
1806     Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
1807                         MVT::Other, Chains);
1808   } else if (I.getNumOperands() != 0) {
1809     SmallVector<EVT, 4> ValueVTs;
1810     ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs);
1811     unsigned NumValues = ValueVTs.size();
1812     if (NumValues) {
1813       SDValue RetOp = getValue(I.getOperand(0));
1814 
1815       const Function *F = I.getParent()->getParent();
1816 
1817       ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
1818       if (F->getAttributes().hasAttribute(AttributeList::ReturnIndex,
1819                                           Attribute::SExt))
1820         ExtendKind = ISD::SIGN_EXTEND;
1821       else if (F->getAttributes().hasAttribute(AttributeList::ReturnIndex,
1822                                                Attribute::ZExt))
1823         ExtendKind = ISD::ZERO_EXTEND;
1824 
1825       LLVMContext &Context = F->getContext();
1826       bool RetInReg = F->getAttributes().hasAttribute(
1827           AttributeList::ReturnIndex, Attribute::InReg);
1828 
1829       for (unsigned j = 0; j != NumValues; ++j) {
1830         EVT VT = ValueVTs[j];
1831 
1832         if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
1833           VT = TLI.getTypeForExtReturn(Context, VT, ExtendKind);
1834 
1835         CallingConv::ID CC = F->getCallingConv();
1836 
1837         unsigned NumParts = TLI.getNumRegistersForCallingConv(Context, CC, VT);
1838         MVT PartVT = TLI.getRegisterTypeForCallingConv(Context, CC, VT);
1839         SmallVector<SDValue, 4> Parts(NumParts);
1840         getCopyToParts(DAG, getCurSDLoc(),
1841                        SDValue(RetOp.getNode(), RetOp.getResNo() + j),
1842                        &Parts[0], NumParts, PartVT, &I, CC, ExtendKind);
1843 
1844         // 'inreg' on function refers to return value
1845         ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1846         if (RetInReg)
1847           Flags.setInReg();
1848 
1849         // Propagate extension type if any
1850         if (ExtendKind == ISD::SIGN_EXTEND)
1851           Flags.setSExt();
1852         else if (ExtendKind == ISD::ZERO_EXTEND)
1853           Flags.setZExt();
1854 
1855         for (unsigned i = 0; i < NumParts; ++i) {
1856           Outs.push_back(ISD::OutputArg(Flags, Parts[i].getValueType(),
1857                                         VT, /*isfixed=*/true, 0, 0));
1858           OutVals.push_back(Parts[i]);
1859         }
1860       }
1861     }
1862   }
1863 
1864   // Push in swifterror virtual register as the last element of Outs. This makes
1865   // sure swifterror virtual register will be returned in the swifterror
1866   // physical register.
1867   const Function *F = I.getParent()->getParent();
1868   if (TLI.supportSwiftError() &&
1869       F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
1870     assert(FuncInfo.SwiftErrorArg && "Need a swift error argument");
1871     ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1872     Flags.setSwiftError();
1873     Outs.push_back(ISD::OutputArg(Flags, EVT(TLI.getPointerTy(DL)) /*vt*/,
1874                                   EVT(TLI.getPointerTy(DL)) /*argvt*/,
1875                                   true /*isfixed*/, 1 /*origidx*/,
1876                                   0 /*partOffs*/));
1877     // Create SDNode for the swifterror virtual register.
1878     OutVals.push_back(
1879         DAG.getRegister(FuncInfo.getOrCreateSwiftErrorVRegUseAt(
1880                             &I, FuncInfo.MBB, FuncInfo.SwiftErrorArg).first,
1881                         EVT(TLI.getPointerTy(DL))));
1882   }
1883 
1884   bool isVarArg = DAG.getMachineFunction().getFunction().isVarArg();
1885   CallingConv::ID CallConv =
1886     DAG.getMachineFunction().getFunction().getCallingConv();
1887   Chain = DAG.getTargetLoweringInfo().LowerReturn(
1888       Chain, CallConv, isVarArg, Outs, OutVals, getCurSDLoc(), DAG);
1889 
1890   // Verify that the target's LowerReturn behaved as expected.
1891   assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
1892          "LowerReturn didn't return a valid chain!");
1893 
1894   // Update the DAG with the new chain value resulting from return lowering.
1895   DAG.setRoot(Chain);
1896 }
1897 
1898 /// CopyToExportRegsIfNeeded - If the given value has virtual registers
1899 /// created for it, emit nodes to copy the value into the virtual
1900 /// registers.
1901 void SelectionDAGBuilder::CopyToExportRegsIfNeeded(const Value *V) {
1902   // Skip empty types
1903   if (V->getType()->isEmptyTy())
1904     return;
1905 
1906   DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
1907   if (VMI != FuncInfo.ValueMap.end()) {
1908     assert(!V->use_empty() && "Unused value assigned virtual registers!");
1909     CopyValueToVirtualRegister(V, VMI->second);
1910   }
1911 }
1912 
1913 /// ExportFromCurrentBlock - If this condition isn't known to be exported from
1914 /// the current basic block, add it to ValueMap now so that we'll get a
1915 /// CopyTo/FromReg.
1916 void SelectionDAGBuilder::ExportFromCurrentBlock(const Value *V) {
1917   // No need to export constants.
1918   if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
1919 
1920   // Already exported?
1921   if (FuncInfo.isExportedInst(V)) return;
1922 
1923   unsigned Reg = FuncInfo.InitializeRegForValue(V);
1924   CopyValueToVirtualRegister(V, Reg);
1925 }
1926 
1927 bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V,
1928                                                      const BasicBlock *FromBB) {
1929   // The operands of the setcc have to be in this block.  We don't know
1930   // how to export them from some other block.
1931   if (const Instruction *VI = dyn_cast<Instruction>(V)) {
1932     // Can export from current BB.
1933     if (VI->getParent() == FromBB)
1934       return true;
1935 
1936     // Is already exported, noop.
1937     return FuncInfo.isExportedInst(V);
1938   }
1939 
1940   // If this is an argument, we can export it if the BB is the entry block or
1941   // if it is already exported.
1942   if (isa<Argument>(V)) {
1943     if (FromBB == &FromBB->getParent()->getEntryBlock())
1944       return true;
1945 
1946     // Otherwise, can only export this if it is already exported.
1947     return FuncInfo.isExportedInst(V);
1948   }
1949 
1950   // Otherwise, constants can always be exported.
1951   return true;
1952 }
1953 
1954 /// Return branch probability calculated by BranchProbabilityInfo for IR blocks.
1955 BranchProbability
1956 SelectionDAGBuilder::getEdgeProbability(const MachineBasicBlock *Src,
1957                                         const MachineBasicBlock *Dst) const {
1958   BranchProbabilityInfo *BPI = FuncInfo.BPI;
1959   const BasicBlock *SrcBB = Src->getBasicBlock();
1960   const BasicBlock *DstBB = Dst->getBasicBlock();
1961   if (!BPI) {
1962     // If BPI is not available, set the default probability as 1 / N, where N is
1963     // the number of successors.
1964     auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
1965     return BranchProbability(1, SuccSize);
1966   }
1967   return BPI->getEdgeProbability(SrcBB, DstBB);
1968 }
1969 
1970 void SelectionDAGBuilder::addSuccessorWithProb(MachineBasicBlock *Src,
1971                                                MachineBasicBlock *Dst,
1972                                                BranchProbability Prob) {
1973   if (!FuncInfo.BPI)
1974     Src->addSuccessorWithoutProb(Dst);
1975   else {
1976     if (Prob.isUnknown())
1977       Prob = getEdgeProbability(Src, Dst);
1978     Src->addSuccessor(Dst, Prob);
1979   }
1980 }
1981 
1982 static bool InBlock(const Value *V, const BasicBlock *BB) {
1983   if (const Instruction *I = dyn_cast<Instruction>(V))
1984     return I->getParent() == BB;
1985   return true;
1986 }
1987 
1988 /// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
1989 /// This function emits a branch and is used at the leaves of an OR or an
1990 /// AND operator tree.
1991 void
1992 SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
1993                                                   MachineBasicBlock *TBB,
1994                                                   MachineBasicBlock *FBB,
1995                                                   MachineBasicBlock *CurBB,
1996                                                   MachineBasicBlock *SwitchBB,
1997                                                   BranchProbability TProb,
1998                                                   BranchProbability FProb,
1999                                                   bool InvertCond) {
2000   const BasicBlock *BB = CurBB->getBasicBlock();
2001 
2002   // If the leaf of the tree is a comparison, merge the condition into
2003   // the caseblock.
2004   if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
2005     // The operands of the cmp have to be in this block.  We don't know
2006     // how to export them from some other block.  If this is the first block
2007     // of the sequence, no exporting is needed.
2008     if (CurBB == SwitchBB ||
2009         (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
2010          isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
2011       ISD::CondCode Condition;
2012       if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
2013         ICmpInst::Predicate Pred =
2014             InvertCond ? IC->getInversePredicate() : IC->getPredicate();
2015         Condition = getICmpCondCode(Pred);
2016       } else {
2017         const FCmpInst *FC = cast<FCmpInst>(Cond);
2018         FCmpInst::Predicate Pred =
2019             InvertCond ? FC->getInversePredicate() : FC->getPredicate();
2020         Condition = getFCmpCondCode(Pred);
2021         if (TM.Options.NoNaNsFPMath)
2022           Condition = getFCmpCodeWithoutNaN(Condition);
2023       }
2024 
2025       CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1), nullptr,
2026                    TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
2027       SwitchCases.push_back(CB);
2028       return;
2029     }
2030   }
2031 
2032   // Create a CaseBlock record representing this branch.
2033   ISD::CondCode Opc = InvertCond ? ISD::SETNE : ISD::SETEQ;
2034   CaseBlock CB(Opc, Cond, ConstantInt::getTrue(*DAG.getContext()),
2035                nullptr, TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
2036   SwitchCases.push_back(CB);
2037 }
2038 
2039 void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
2040                                                MachineBasicBlock *TBB,
2041                                                MachineBasicBlock *FBB,
2042                                                MachineBasicBlock *CurBB,
2043                                                MachineBasicBlock *SwitchBB,
2044                                                Instruction::BinaryOps Opc,
2045                                                BranchProbability TProb,
2046                                                BranchProbability FProb,
2047                                                bool InvertCond) {
2048   // Skip over not part of the tree and remember to invert op and operands at
2049   // next level.
2050   Value *NotCond;
2051   if (match(Cond, m_OneUse(m_Not(m_Value(NotCond)))) &&
2052       InBlock(NotCond, CurBB->getBasicBlock())) {
2053     FindMergedConditions(NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
2054                          !InvertCond);
2055     return;
2056   }
2057 
2058   const Instruction *BOp = dyn_cast<Instruction>(Cond);
2059   // Compute the effective opcode for Cond, taking into account whether it needs
2060   // to be inverted, e.g.
2061   //   and (not (or A, B)), C
2062   // gets lowered as
2063   //   and (and (not A, not B), C)
2064   unsigned BOpc = 0;
2065   if (BOp) {
2066     BOpc = BOp->getOpcode();
2067     if (InvertCond) {
2068       if (BOpc == Instruction::And)
2069         BOpc = Instruction::Or;
2070       else if (BOpc == Instruction::Or)
2071         BOpc = Instruction::And;
2072     }
2073   }
2074 
2075   // If this node is not part of the or/and tree, emit it as a branch.
2076   if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) ||
2077       BOpc != unsigned(Opc) || !BOp->hasOneUse() ||
2078       BOp->getParent() != CurBB->getBasicBlock() ||
2079       !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) ||
2080       !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) {
2081     EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB,
2082                                  TProb, FProb, InvertCond);
2083     return;
2084   }
2085 
2086   //  Create TmpBB after CurBB.
2087   MachineFunction::iterator BBI(CurBB);
2088   MachineFunction &MF = DAG.getMachineFunction();
2089   MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
2090   CurBB->getParent()->insert(++BBI, TmpBB);
2091 
2092   if (Opc == Instruction::Or) {
2093     // Codegen X | Y as:
2094     // BB1:
2095     //   jmp_if_X TBB
2096     //   jmp TmpBB
2097     // TmpBB:
2098     //   jmp_if_Y TBB
2099     //   jmp FBB
2100     //
2101 
2102     // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
2103     // The requirement is that
2104     //   TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
2105     //     = TrueProb for original BB.
2106     // Assuming the original probabilities are A and B, one choice is to set
2107     // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
2108     // A/(1+B) and 2B/(1+B). This choice assumes that
2109     //   TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
2110     // Another choice is to assume TrueProb for BB1 equals to TrueProb for
2111     // TmpBB, but the math is more complicated.
2112 
2113     auto NewTrueProb = TProb / 2;
2114     auto NewFalseProb = TProb / 2 + FProb;
2115     // Emit the LHS condition.
2116     FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, SwitchBB, Opc,
2117                          NewTrueProb, NewFalseProb, InvertCond);
2118 
2119     // Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
2120     SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb};
2121     BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
2122     // Emit the RHS condition into TmpBB.
2123     FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc,
2124                          Probs[0], Probs[1], InvertCond);
2125   } else {
2126     assert(Opc == Instruction::And && "Unknown merge op!");
2127     // Codegen X & Y as:
2128     // BB1:
2129     //   jmp_if_X TmpBB
2130     //   jmp FBB
2131     // TmpBB:
2132     //   jmp_if_Y TBB
2133     //   jmp FBB
2134     //
2135     //  This requires creation of TmpBB after CurBB.
2136 
2137     // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
2138     // The requirement is that
2139     //   FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
2140     //     = FalseProb for original BB.
2141     // Assuming the original probabilities are A and B, one choice is to set
2142     // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
2143     // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
2144     // TrueProb for BB1 * FalseProb for TmpBB.
2145 
2146     auto NewTrueProb = TProb + FProb / 2;
2147     auto NewFalseProb = FProb / 2;
2148     // Emit the LHS condition.
2149     FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, SwitchBB, Opc,
2150                          NewTrueProb, NewFalseProb, InvertCond);
2151 
2152     // Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
2153     SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2};
2154     BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
2155     // Emit the RHS condition into TmpBB.
2156     FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc,
2157                          Probs[0], Probs[1], InvertCond);
2158   }
2159 }
2160 
2161 /// If the set of cases should be emitted as a series of branches, return true.
2162 /// If we should emit this as a bunch of and/or'd together conditions, return
2163 /// false.
2164 bool
2165 SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases) {
2166   if (Cases.size() != 2) return true;
2167 
2168   // If this is two comparisons of the same values or'd or and'd together, they
2169   // will get folded into a single comparison, so don't emit two blocks.
2170   if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
2171        Cases[0].CmpRHS == Cases[1].CmpRHS) ||
2172       (Cases[0].CmpRHS == Cases[1].CmpLHS &&
2173        Cases[0].CmpLHS == Cases[1].CmpRHS)) {
2174     return false;
2175   }
2176 
2177   // Handle: (X != null) | (Y != null) --> (X|Y) != 0
2178   // Handle: (X == null) & (Y == null) --> (X|Y) == 0
2179   if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
2180       Cases[0].CC == Cases[1].CC &&
2181       isa<Constant>(Cases[0].CmpRHS) &&
2182       cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
2183     if (Cases[0].CC == ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
2184       return false;
2185     if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
2186       return false;
2187   }
2188 
2189   return true;
2190 }
2191 
2192 void SelectionDAGBuilder::visitBr(const BranchInst &I) {
2193   MachineBasicBlock *BrMBB = FuncInfo.MBB;
2194 
2195   // Update machine-CFG edges.
2196   MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
2197 
2198   if (I.isUnconditional()) {
2199     // Update machine-CFG edges.
2200     BrMBB->addSuccessor(Succ0MBB);
2201 
2202     // If this is not a fall-through branch or optimizations are switched off,
2203     // emit the branch.
2204     if (Succ0MBB != NextBlock(BrMBB) || TM.getOptLevel() == CodeGenOpt::None)
2205       DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
2206                               MVT::Other, getControlRoot(),
2207                               DAG.getBasicBlock(Succ0MBB)));
2208 
2209     return;
2210   }
2211 
2212   // If this condition is one of the special cases we handle, do special stuff
2213   // now.
2214   const Value *CondVal = I.getCondition();
2215   MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
2216 
2217   // If this is a series of conditions that are or'd or and'd together, emit
2218   // this as a sequence of branches instead of setcc's with and/or operations.
2219   // As long as jumps are not expensive, this should improve performance.
2220   // For example, instead of something like:
2221   //     cmp A, B
2222   //     C = seteq
2223   //     cmp D, E
2224   //     F = setle
2225   //     or C, F
2226   //     jnz foo
2227   // Emit:
2228   //     cmp A, B
2229   //     je foo
2230   //     cmp D, E
2231   //     jle foo
2232   if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
2233     Instruction::BinaryOps Opcode = BOp->getOpcode();
2234     if (!DAG.getTargetLoweringInfo().isJumpExpensive() && BOp->hasOneUse() &&
2235         !I.getMetadata(LLVMContext::MD_unpredictable) &&
2236         (Opcode == Instruction::And || Opcode == Instruction::Or)) {
2237       FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB,
2238                            Opcode,
2239                            getEdgeProbability(BrMBB, Succ0MBB),
2240                            getEdgeProbability(BrMBB, Succ1MBB),
2241                            /*InvertCond=*/false);
2242       // If the compares in later blocks need to use values not currently
2243       // exported from this block, export them now.  This block should always
2244       // be the first entry.
2245       assert(SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!");
2246 
2247       // Allow some cases to be rejected.
2248       if (ShouldEmitAsBranches(SwitchCases)) {
2249         for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) {
2250           ExportFromCurrentBlock(SwitchCases[i].CmpLHS);
2251           ExportFromCurrentBlock(SwitchCases[i].CmpRHS);
2252         }
2253 
2254         // Emit the branch for this block.
2255         visitSwitchCase(SwitchCases[0], BrMBB);
2256         SwitchCases.erase(SwitchCases.begin());
2257         return;
2258       }
2259 
2260       // Okay, we decided not to do this, remove any inserted MBB's and clear
2261       // SwitchCases.
2262       for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i)
2263         FuncInfo.MF->erase(SwitchCases[i].ThisBB);
2264 
2265       SwitchCases.clear();
2266     }
2267   }
2268 
2269   // Create a CaseBlock record representing this branch.
2270   CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
2271                nullptr, Succ0MBB, Succ1MBB, BrMBB, getCurSDLoc());
2272 
2273   // Use visitSwitchCase to actually insert the fast branch sequence for this
2274   // cond branch.
2275   visitSwitchCase(CB, BrMBB);
2276 }
2277 
2278 /// visitSwitchCase - Emits the necessary code to represent a single node in
2279 /// the binary search tree resulting from lowering a switch instruction.
2280 void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB,
2281                                           MachineBasicBlock *SwitchBB) {
2282   SDValue Cond;
2283   SDValue CondLHS = getValue(CB.CmpLHS);
2284   SDLoc dl = CB.DL;
2285 
2286   // Build the setcc now.
2287   if (!CB.CmpMHS) {
2288     // Fold "(X == true)" to X and "(X == false)" to !X to
2289     // handle common cases produced by branch lowering.
2290     if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
2291         CB.CC == ISD::SETEQ)
2292       Cond = CondLHS;
2293     else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) &&
2294              CB.CC == ISD::SETEQ) {
2295       SDValue True = DAG.getConstant(1, dl, CondLHS.getValueType());
2296       Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
2297     } else
2298       Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC);
2299   } else {
2300     assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
2301 
2302     const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
2303     const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
2304 
2305     SDValue CmpOp = getValue(CB.CmpMHS);
2306     EVT VT = CmpOp.getValueType();
2307 
2308     if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
2309       Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, dl, VT),
2310                           ISD::SETLE);
2311     } else {
2312       SDValue SUB = DAG.getNode(ISD::SUB, dl,
2313                                 VT, CmpOp, DAG.getConstant(Low, dl, VT));
2314       Cond = DAG.getSetCC(dl, MVT::i1, SUB,
2315                           DAG.getConstant(High-Low, dl, VT), ISD::SETULE);
2316     }
2317   }
2318 
2319   // Update successor info
2320   addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
2321   // TrueBB and FalseBB are always different unless the incoming IR is
2322   // degenerate. This only happens when running llc on weird IR.
2323   if (CB.TrueBB != CB.FalseBB)
2324     addSuccessorWithProb(SwitchBB, CB.FalseBB, CB.FalseProb);
2325   SwitchBB->normalizeSuccProbs();
2326 
2327   // If the lhs block is the next block, invert the condition so that we can
2328   // fall through to the lhs instead of the rhs block.
2329   if (CB.TrueBB == NextBlock(SwitchBB)) {
2330     std::swap(CB.TrueBB, CB.FalseBB);
2331     SDValue True = DAG.getConstant(1, dl, Cond.getValueType());
2332     Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
2333   }
2334 
2335   SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2336                                MVT::Other, getControlRoot(), Cond,
2337                                DAG.getBasicBlock(CB.TrueBB));
2338 
2339   // Insert the false branch. Do this even if it's a fall through branch,
2340   // this makes it easier to do DAG optimizations which require inverting
2341   // the branch condition.
2342   BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2343                        DAG.getBasicBlock(CB.FalseBB));
2344 
2345   DAG.setRoot(BrCond);
2346 }
2347 
2348 /// visitJumpTable - Emit JumpTable node in the current MBB
2349 void SelectionDAGBuilder::visitJumpTable(JumpTable &JT) {
2350   // Emit the code for the jump table
2351   assert(JT.Reg != -1U && "Should lower JT Header first!");
2352   EVT PTy = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
2353   SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurSDLoc(),
2354                                      JT.Reg, PTy);
2355   SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
2356   SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, getCurSDLoc(),
2357                                     MVT::Other, Index.getValue(1),
2358                                     Table, Index);
2359   DAG.setRoot(BrJumpTable);
2360 }
2361 
2362 /// visitJumpTableHeader - This function emits necessary code to produce index
2363 /// in the JumpTable from switch case.
2364 void SelectionDAGBuilder::visitJumpTableHeader(JumpTable &JT,
2365                                                JumpTableHeader &JTH,
2366                                                MachineBasicBlock *SwitchBB) {
2367   SDLoc dl = getCurSDLoc();
2368 
2369   // Subtract the lowest switch case value from the value being switched on and
2370   // conditional branch to default mbb if the result is greater than the
2371   // difference between smallest and largest cases.
2372   SDValue SwitchOp = getValue(JTH.SValue);
2373   EVT VT = SwitchOp.getValueType();
2374   SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp,
2375                             DAG.getConstant(JTH.First, dl, VT));
2376 
2377   // The SDNode we just created, which holds the value being switched on minus
2378   // the smallest case value, needs to be copied to a virtual register so it
2379   // can be used as an index into the jump table in a subsequent basic block.
2380   // This value may be smaller or larger than the target's pointer type, and
2381   // therefore require extension or truncating.
2382   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2383   SwitchOp = DAG.getZExtOrTrunc(Sub, dl, TLI.getPointerTy(DAG.getDataLayout()));
2384 
2385   unsigned JumpTableReg =
2386       FuncInfo.CreateReg(TLI.getPointerTy(DAG.getDataLayout()));
2387   SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl,
2388                                     JumpTableReg, SwitchOp);
2389   JT.Reg = JumpTableReg;
2390 
2391   // Emit the range check for the jump table, and branch to the default block
2392   // for the switch statement if the value being switched on exceeds the largest
2393   // case in the switch.
2394   SDValue CMP = DAG.getSetCC(
2395       dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
2396                                  Sub.getValueType()),
2397       Sub, DAG.getConstant(JTH.Last - JTH.First, dl, VT), ISD::SETUGT);
2398 
2399   SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2400                                MVT::Other, CopyTo, CMP,
2401                                DAG.getBasicBlock(JT.Default));
2402 
2403   // Avoid emitting unnecessary branches to the next block.
2404   if (JT.MBB != NextBlock(SwitchBB))
2405     BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2406                          DAG.getBasicBlock(JT.MBB));
2407 
2408   DAG.setRoot(BrCond);
2409 }
2410 
2411 /// Create a LOAD_STACK_GUARD node, and let it carry the target specific global
2412 /// variable if there exists one.
2413 static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL,
2414                                  SDValue &Chain) {
2415   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2416   EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
2417   MachineFunction &MF = DAG.getMachineFunction();
2418   Value *Global = TLI.getSDagStackGuard(*MF.getFunction().getParent());
2419   MachineSDNode *Node =
2420       DAG.getMachineNode(TargetOpcode::LOAD_STACK_GUARD, DL, PtrTy, Chain);
2421   if (Global) {
2422     MachinePointerInfo MPInfo(Global);
2423     auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
2424                  MachineMemOperand::MODereferenceable;
2425     MachineMemOperand *MemRef = MF.getMachineMemOperand(
2426         MPInfo, Flags, PtrTy.getSizeInBits() / 8, DAG.getEVTAlignment(PtrTy));
2427     DAG.setNodeMemRefs(Node, {MemRef});
2428   }
2429   return SDValue(Node, 0);
2430 }
2431 
2432 /// Codegen a new tail for a stack protector check ParentMBB which has had its
2433 /// tail spliced into a stack protector check success bb.
2434 ///
2435 /// For a high level explanation of how this fits into the stack protector
2436 /// generation see the comment on the declaration of class
2437 /// StackProtectorDescriptor.
2438 void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
2439                                                   MachineBasicBlock *ParentBB) {
2440 
2441   // First create the loads to the guard/stack slot for the comparison.
2442   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2443   EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
2444 
2445   MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
2446   int FI = MFI.getStackProtectorIndex();
2447 
2448   SDValue Guard;
2449   SDLoc dl = getCurSDLoc();
2450   SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy);
2451   const Module &M = *ParentBB->getParent()->getFunction().getParent();
2452   unsigned Align = DL->getPrefTypeAlignment(Type::getInt8PtrTy(M.getContext()));
2453 
2454   // Generate code to load the content of the guard slot.
2455   SDValue GuardVal = DAG.getLoad(
2456       PtrTy, dl, DAG.getEntryNode(), StackSlotPtr,
2457       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), Align,
2458       MachineMemOperand::MOVolatile);
2459 
2460   if (TLI.useStackGuardXorFP())
2461     GuardVal = TLI.emitStackGuardXorFP(DAG, GuardVal, dl);
2462 
2463   // Retrieve guard check function, nullptr if instrumentation is inlined.
2464   if (const Function *GuardCheckFn = TLI.getSSPStackGuardCheck(M)) {
2465     // The target provides a guard check function to validate the guard value.
2466     // Generate a call to that function with the content of the guard slot as
2467     // argument.
2468     FunctionType *FnTy = GuardCheckFn->getFunctionType();
2469     assert(FnTy->getNumParams() == 1 && "Invalid function signature");
2470 
2471     TargetLowering::ArgListTy Args;
2472     TargetLowering::ArgListEntry Entry;
2473     Entry.Node = GuardVal;
2474     Entry.Ty = FnTy->getParamType(0);
2475     if (GuardCheckFn->hasAttribute(1, Attribute::AttrKind::InReg))
2476       Entry.IsInReg = true;
2477     Args.push_back(Entry);
2478 
2479     TargetLowering::CallLoweringInfo CLI(DAG);
2480     CLI.setDebugLoc(getCurSDLoc())
2481         .setChain(DAG.getEntryNode())
2482         .setCallee(GuardCheckFn->getCallingConv(), FnTy->getReturnType(),
2483                    getValue(GuardCheckFn), std::move(Args));
2484 
2485     std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
2486     DAG.setRoot(Result.second);
2487     return;
2488   }
2489 
2490   // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
2491   // Otherwise, emit a volatile load to retrieve the stack guard value.
2492   SDValue Chain = DAG.getEntryNode();
2493   if (TLI.useLoadStackGuardNode()) {
2494     Guard = getLoadStackGuard(DAG, dl, Chain);
2495   } else {
2496     const Value *IRGuard = TLI.getSDagStackGuard(M);
2497     SDValue GuardPtr = getValue(IRGuard);
2498 
2499     Guard =
2500         DAG.getLoad(PtrTy, dl, Chain, GuardPtr, MachinePointerInfo(IRGuard, 0),
2501                     Align, MachineMemOperand::MOVolatile);
2502   }
2503 
2504   // Perform the comparison via a subtract/getsetcc.
2505   EVT VT = Guard.getValueType();
2506   SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, Guard, GuardVal);
2507 
2508   SDValue Cmp = DAG.getSetCC(dl, TLI.getSetCCResultType(DAG.getDataLayout(),
2509                                                         *DAG.getContext(),
2510                                                         Sub.getValueType()),
2511                              Sub, DAG.getConstant(0, dl, VT), ISD::SETNE);
2512 
2513   // If the sub is not 0, then we know the guard/stackslot do not equal, so
2514   // branch to failure MBB.
2515   SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2516                                MVT::Other, GuardVal.getOperand(0),
2517                                Cmp, DAG.getBasicBlock(SPD.getFailureMBB()));
2518   // Otherwise branch to success MBB.
2519   SDValue Br = DAG.getNode(ISD::BR, dl,
2520                            MVT::Other, BrCond,
2521                            DAG.getBasicBlock(SPD.getSuccessMBB()));
2522 
2523   DAG.setRoot(Br);
2524 }
2525 
2526 /// Codegen the failure basic block for a stack protector check.
2527 ///
2528 /// A failure stack protector machine basic block consists simply of a call to
2529 /// __stack_chk_fail().
2530 ///
2531 /// For a high level explanation of how this fits into the stack protector
2532 /// generation see the comment on the declaration of class
2533 /// StackProtectorDescriptor.
2534 void
2535 SelectionDAGBuilder::visitSPDescriptorFailure(StackProtectorDescriptor &SPD) {
2536   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2537   SDValue Chain =
2538       TLI.makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid,
2539                       None, false, getCurSDLoc(), false, false).second;
2540   DAG.setRoot(Chain);
2541 }
2542 
2543 /// visitBitTestHeader - This function emits necessary code to produce value
2544 /// suitable for "bit tests"
2545 void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
2546                                              MachineBasicBlock *SwitchBB) {
2547   SDLoc dl = getCurSDLoc();
2548 
2549   // Subtract the minimum value
2550   SDValue SwitchOp = getValue(B.SValue);
2551   EVT VT = SwitchOp.getValueType();
2552   SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp,
2553                             DAG.getConstant(B.First, dl, VT));
2554 
2555   // Check range
2556   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2557   SDValue RangeCmp = DAG.getSetCC(
2558       dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
2559                                  Sub.getValueType()),
2560       Sub, DAG.getConstant(B.Range, dl, VT), ISD::SETUGT);
2561 
2562   // Determine the type of the test operands.
2563   bool UsePtrType = false;
2564   if (!TLI.isTypeLegal(VT))
2565     UsePtrType = true;
2566   else {
2567     for (unsigned i = 0, e = B.Cases.size(); i != e; ++i)
2568       if (!isUIntN(VT.getSizeInBits(), B.Cases[i].Mask)) {
2569         // Switch table case range are encoded into series of masks.
2570         // Just use pointer type, it's guaranteed to fit.
2571         UsePtrType = true;
2572         break;
2573       }
2574   }
2575   if (UsePtrType) {
2576     VT = TLI.getPointerTy(DAG.getDataLayout());
2577     Sub = DAG.getZExtOrTrunc(Sub, dl, VT);
2578   }
2579 
2580   B.RegVT = VT.getSimpleVT();
2581   B.Reg = FuncInfo.CreateReg(B.RegVT);
2582   SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl, B.Reg, Sub);
2583 
2584   MachineBasicBlock* MBB = B.Cases[0].ThisBB;
2585 
2586   addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb);
2587   addSuccessorWithProb(SwitchBB, MBB, B.Prob);
2588   SwitchBB->normalizeSuccProbs();
2589 
2590   SDValue BrRange = DAG.getNode(ISD::BRCOND, dl,
2591                                 MVT::Other, CopyTo, RangeCmp,
2592                                 DAG.getBasicBlock(B.Default));
2593 
2594   // Avoid emitting unnecessary branches to the next block.
2595   if (MBB != NextBlock(SwitchBB))
2596     BrRange = DAG.getNode(ISD::BR, dl, MVT::Other, BrRange,
2597                           DAG.getBasicBlock(MBB));
2598 
2599   DAG.setRoot(BrRange);
2600 }
2601 
2602 /// visitBitTestCase - this function produces one "bit test"
2603 void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB,
2604                                            MachineBasicBlock* NextMBB,
2605                                            BranchProbability BranchProbToNext,
2606                                            unsigned Reg,
2607                                            BitTestCase &B,
2608                                            MachineBasicBlock *SwitchBB) {
2609   SDLoc dl = getCurSDLoc();
2610   MVT VT = BB.RegVT;
2611   SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), dl, Reg, VT);
2612   SDValue Cmp;
2613   unsigned PopCount = countPopulation(B.Mask);
2614   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2615   if (PopCount == 1) {
2616     // Testing for a single bit; just compare the shift count with what it
2617     // would need to be to shift a 1 bit in that position.
2618     Cmp = DAG.getSetCC(
2619         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2620         ShiftOp, DAG.getConstant(countTrailingZeros(B.Mask), dl, VT),
2621         ISD::SETEQ);
2622   } else if (PopCount == BB.Range) {
2623     // There is only one zero bit in the range, test for it directly.
2624     Cmp = DAG.getSetCC(
2625         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2626         ShiftOp, DAG.getConstant(countTrailingOnes(B.Mask), dl, VT),
2627         ISD::SETNE);
2628   } else {
2629     // Make desired shift
2630     SDValue SwitchVal = DAG.getNode(ISD::SHL, dl, VT,
2631                                     DAG.getConstant(1, dl, VT), ShiftOp);
2632 
2633     // Emit bit tests and jumps
2634     SDValue AndOp = DAG.getNode(ISD::AND, dl,
2635                                 VT, SwitchVal, DAG.getConstant(B.Mask, dl, VT));
2636     Cmp = DAG.getSetCC(
2637         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2638         AndOp, DAG.getConstant(0, dl, VT), ISD::SETNE);
2639   }
2640 
2641   // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
2642   addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb);
2643   // The branch probability from SwitchBB to NextMBB is BranchProbToNext.
2644   addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
2645   // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is
2646   // one as they are relative probabilities (and thus work more like weights),
2647   // and hence we need to normalize them to let the sum of them become one.
2648   SwitchBB->normalizeSuccProbs();
2649 
2650   SDValue BrAnd = DAG.getNode(ISD::BRCOND, dl,
2651                               MVT::Other, getControlRoot(),
2652                               Cmp, DAG.getBasicBlock(B.TargetBB));
2653 
2654   // Avoid emitting unnecessary branches to the next block.
2655   if (NextMBB != NextBlock(SwitchBB))
2656     BrAnd = DAG.getNode(ISD::BR, dl, MVT::Other, BrAnd,
2657                         DAG.getBasicBlock(NextMBB));
2658 
2659   DAG.setRoot(BrAnd);
2660 }
2661 
2662 void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
2663   MachineBasicBlock *InvokeMBB = FuncInfo.MBB;
2664 
2665   // Retrieve successors. Look through artificial IR level blocks like
2666   // catchswitch for successors.
2667   MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
2668   const BasicBlock *EHPadBB = I.getSuccessor(1);
2669 
2670   // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
2671   // have to do anything here to lower funclet bundles.
2672   assert(!I.hasOperandBundlesOtherThan(
2673              {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) &&
2674          "Cannot lower invokes with arbitrary operand bundles yet!");
2675 
2676   const Value *Callee(I.getCalledValue());
2677   const Function *Fn = dyn_cast<Function>(Callee);
2678   if (isa<InlineAsm>(Callee))
2679     visitInlineAsm(&I);
2680   else if (Fn && Fn->isIntrinsic()) {
2681     switch (Fn->getIntrinsicID()) {
2682     default:
2683       llvm_unreachable("Cannot invoke this intrinsic");
2684     case Intrinsic::donothing:
2685       // Ignore invokes to @llvm.donothing: jump directly to the next BB.
2686       break;
2687     case Intrinsic::experimental_patchpoint_void:
2688     case Intrinsic::experimental_patchpoint_i64:
2689       visitPatchpoint(&I, EHPadBB);
2690       break;
2691     case Intrinsic::experimental_gc_statepoint:
2692       LowerStatepoint(ImmutableStatepoint(&I), EHPadBB);
2693       break;
2694     }
2695   } else if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) {
2696     // Currently we do not lower any intrinsic calls with deopt operand bundles.
2697     // Eventually we will support lowering the @llvm.experimental.deoptimize
2698     // intrinsic, and right now there are no plans to support other intrinsics
2699     // with deopt state.
2700     LowerCallSiteWithDeoptBundle(&I, getValue(Callee), EHPadBB);
2701   } else {
2702     LowerCallTo(&I, getValue(Callee), false, EHPadBB);
2703   }
2704 
2705   // If the value of the invoke is used outside of its defining block, make it
2706   // available as a virtual register.
2707   // We already took care of the exported value for the statepoint instruction
2708   // during call to the LowerStatepoint.
2709   if (!isStatepoint(I)) {
2710     CopyToExportRegsIfNeeded(&I);
2711   }
2712 
2713   SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
2714   BranchProbabilityInfo *BPI = FuncInfo.BPI;
2715   BranchProbability EHPadBBProb =
2716       BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB)
2717           : BranchProbability::getZero();
2718   findUnwindDestinations(FuncInfo, EHPadBB, EHPadBBProb, UnwindDests);
2719 
2720   // Update successor info.
2721   addSuccessorWithProb(InvokeMBB, Return);
2722   for (auto &UnwindDest : UnwindDests) {
2723     UnwindDest.first->setIsEHPad();
2724     addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
2725   }
2726   InvokeMBB->normalizeSuccProbs();
2727 
2728   // Drop into normal successor.
2729   DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, getControlRoot(),
2730                           DAG.getBasicBlock(Return)));
2731 }
2732 
2733 void SelectionDAGBuilder::visitCallBr(const CallBrInst &I) {
2734   MachineBasicBlock *CallBrMBB = FuncInfo.MBB;
2735 
2736   // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
2737   // have to do anything here to lower funclet bundles.
2738   assert(!I.hasOperandBundlesOtherThan(
2739              {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) &&
2740          "Cannot lower callbrs with arbitrary operand bundles yet!");
2741 
2742   assert(isa<InlineAsm>(I.getCalledValue()) &&
2743          "Only know how to handle inlineasm callbr");
2744   visitInlineAsm(&I);
2745 
2746   // Retrieve successors.
2747   MachineBasicBlock *Return = FuncInfo.MBBMap[I.getDefaultDest()];
2748 
2749   // Update successor info.
2750   addSuccessorWithProb(CallBrMBB, Return);
2751   for (unsigned i = 0, e = I.getNumIndirectDests(); i < e; ++i) {
2752     MachineBasicBlock *Target = FuncInfo.MBBMap[I.getIndirectDest(i)];
2753     addSuccessorWithProb(CallBrMBB, Target);
2754   }
2755   CallBrMBB->normalizeSuccProbs();
2756 
2757   // Drop into default successor.
2758   DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
2759                           MVT::Other, getControlRoot(),
2760                           DAG.getBasicBlock(Return)));
2761 }
2762 
2763 void SelectionDAGBuilder::visitResume(const ResumeInst &RI) {
2764   llvm_unreachable("SelectionDAGBuilder shouldn't visit resume instructions!");
2765 }
2766 
2767 void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) {
2768   assert(FuncInfo.MBB->isEHPad() &&
2769          "Call to landingpad not in landing pad!");
2770 
2771   // If there aren't registers to copy the values into (e.g., during SjLj
2772   // exceptions), then don't bother to create these DAG nodes.
2773   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2774   const Constant *PersonalityFn = FuncInfo.Fn->getPersonalityFn();
2775   if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
2776       TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
2777     return;
2778 
2779   // If landingpad's return type is token type, we don't create DAG nodes
2780   // for its exception pointer and selector value. The extraction of exception
2781   // pointer or selector value from token type landingpads is not currently
2782   // supported.
2783   if (LP.getType()->isTokenTy())
2784     return;
2785 
2786   SmallVector<EVT, 2> ValueVTs;
2787   SDLoc dl = getCurSDLoc();
2788   ComputeValueVTs(TLI, DAG.getDataLayout(), LP.getType(), ValueVTs);
2789   assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported");
2790 
2791   // Get the two live-in registers as SDValues. The physregs have already been
2792   // copied into virtual registers.
2793   SDValue Ops[2];
2794   if (FuncInfo.ExceptionPointerVirtReg) {
2795     Ops[0] = DAG.getZExtOrTrunc(
2796         DAG.getCopyFromReg(DAG.getEntryNode(), dl,
2797                            FuncInfo.ExceptionPointerVirtReg,
2798                            TLI.getPointerTy(DAG.getDataLayout())),
2799         dl, ValueVTs[0]);
2800   } else {
2801     Ops[0] = DAG.getConstant(0, dl, TLI.getPointerTy(DAG.getDataLayout()));
2802   }
2803   Ops[1] = DAG.getZExtOrTrunc(
2804       DAG.getCopyFromReg(DAG.getEntryNode(), dl,
2805                          FuncInfo.ExceptionSelectorVirtReg,
2806                          TLI.getPointerTy(DAG.getDataLayout())),
2807       dl, ValueVTs[1]);
2808 
2809   // Merge into one.
2810   SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl,
2811                             DAG.getVTList(ValueVTs), Ops);
2812   setValue(&LP, Res);
2813 }
2814 
2815 void SelectionDAGBuilder::sortAndRangeify(CaseClusterVector &Clusters) {
2816 #ifndef NDEBUG
2817   for (const CaseCluster &CC : Clusters)
2818     assert(CC.Low == CC.High && "Input clusters must be single-case");
2819 #endif
2820 
2821   llvm::sort(Clusters, [](const CaseCluster &a, const CaseCluster &b) {
2822     return a.Low->getValue().slt(b.Low->getValue());
2823   });
2824 
2825   // Merge adjacent clusters with the same destination.
2826   const unsigned N = Clusters.size();
2827   unsigned DstIndex = 0;
2828   for (unsigned SrcIndex = 0; SrcIndex < N; ++SrcIndex) {
2829     CaseCluster &CC = Clusters[SrcIndex];
2830     const ConstantInt *CaseVal = CC.Low;
2831     MachineBasicBlock *Succ = CC.MBB;
2832 
2833     if (DstIndex != 0 && Clusters[DstIndex - 1].MBB == Succ &&
2834         (CaseVal->getValue() - Clusters[DstIndex - 1].High->getValue()) == 1) {
2835       // If this case has the same successor and is a neighbour, merge it into
2836       // the previous cluster.
2837       Clusters[DstIndex - 1].High = CaseVal;
2838       Clusters[DstIndex - 1].Prob += CC.Prob;
2839     } else {
2840       std::memmove(&Clusters[DstIndex++], &Clusters[SrcIndex],
2841                    sizeof(Clusters[SrcIndex]));
2842     }
2843   }
2844   Clusters.resize(DstIndex);
2845 }
2846 
2847 void SelectionDAGBuilder::UpdateSplitBlock(MachineBasicBlock *First,
2848                                            MachineBasicBlock *Last) {
2849   // Update JTCases.
2850   for (unsigned i = 0, e = JTCases.size(); i != e; ++i)
2851     if (JTCases[i].first.HeaderBB == First)
2852       JTCases[i].first.HeaderBB = Last;
2853 
2854   // Update BitTestCases.
2855   for (unsigned i = 0, e = BitTestCases.size(); i != e; ++i)
2856     if (BitTestCases[i].Parent == First)
2857       BitTestCases[i].Parent = Last;
2858 }
2859 
2860 void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
2861   MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB;
2862 
2863   // Update machine-CFG edges with unique successors.
2864   SmallSet<BasicBlock*, 32> Done;
2865   for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) {
2866     BasicBlock *BB = I.getSuccessor(i);
2867     bool Inserted = Done.insert(BB).second;
2868     if (!Inserted)
2869         continue;
2870 
2871     MachineBasicBlock *Succ = FuncInfo.MBBMap[BB];
2872     addSuccessorWithProb(IndirectBrMBB, Succ);
2873   }
2874   IndirectBrMBB->normalizeSuccProbs();
2875 
2876   DAG.setRoot(DAG.getNode(ISD::BRIND, getCurSDLoc(),
2877                           MVT::Other, getControlRoot(),
2878                           getValue(I.getAddress())));
2879 }
2880 
2881 void SelectionDAGBuilder::visitUnreachable(const UnreachableInst &I) {
2882   if (!DAG.getTarget().Options.TrapUnreachable)
2883     return;
2884 
2885   // We may be able to ignore unreachable behind a noreturn call.
2886   if (DAG.getTarget().Options.NoTrapAfterNoreturn) {
2887     const BasicBlock &BB = *I.getParent();
2888     if (&I != &BB.front()) {
2889       BasicBlock::const_iterator PredI =
2890         std::prev(BasicBlock::const_iterator(&I));
2891       if (const CallInst *Call = dyn_cast<CallInst>(&*PredI)) {
2892         if (Call->doesNotReturn())
2893           return;
2894       }
2895     }
2896   }
2897 
2898   DAG.setRoot(DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, DAG.getRoot()));
2899 }
2900 
2901 void SelectionDAGBuilder::visitFSub(const User &I) {
2902   // -0.0 - X --> fneg
2903   Type *Ty = I.getType();
2904   if (isa<Constant>(I.getOperand(0)) &&
2905       I.getOperand(0) == ConstantFP::getZeroValueForNegation(Ty)) {
2906     SDValue Op2 = getValue(I.getOperand(1));
2907     setValue(&I, DAG.getNode(ISD::FNEG, getCurSDLoc(),
2908                              Op2.getValueType(), Op2));
2909     return;
2910   }
2911 
2912   visitBinary(I, ISD::FSUB);
2913 }
2914 
2915 /// Checks if the given instruction performs a vector reduction, in which case
2916 /// we have the freedom to alter the elements in the result as long as the
2917 /// reduction of them stays unchanged.
2918 static bool isVectorReductionOp(const User *I) {
2919   const Instruction *Inst = dyn_cast<Instruction>(I);
2920   if (!Inst || !Inst->getType()->isVectorTy())
2921     return false;
2922 
2923   auto OpCode = Inst->getOpcode();
2924   switch (OpCode) {
2925   case Instruction::Add:
2926   case Instruction::Mul:
2927   case Instruction::And:
2928   case Instruction::Or:
2929   case Instruction::Xor:
2930     break;
2931   case Instruction::FAdd:
2932   case Instruction::FMul:
2933     if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(Inst))
2934       if (FPOp->getFastMathFlags().isFast())
2935         break;
2936     LLVM_FALLTHROUGH;
2937   default:
2938     return false;
2939   }
2940 
2941   unsigned ElemNum = Inst->getType()->getVectorNumElements();
2942   // Ensure the reduction size is a power of 2.
2943   if (!isPowerOf2_32(ElemNum))
2944     return false;
2945 
2946   unsigned ElemNumToReduce = ElemNum;
2947 
2948   // Do DFS search on the def-use chain from the given instruction. We only
2949   // allow four kinds of operations during the search until we reach the
2950   // instruction that extracts the first element from the vector:
2951   //
2952   //   1. The reduction operation of the same opcode as the given instruction.
2953   //
2954   //   2. PHI node.
2955   //
2956   //   3. ShuffleVector instruction together with a reduction operation that
2957   //      does a partial reduction.
2958   //
2959   //   4. ExtractElement that extracts the first element from the vector, and we
2960   //      stop searching the def-use chain here.
2961   //
2962   // 3 & 4 above perform a reduction on all elements of the vector. We push defs
2963   // from 1-3 to the stack to continue the DFS. The given instruction is not
2964   // a reduction operation if we meet any other instructions other than those
2965   // listed above.
2966 
2967   SmallVector<const User *, 16> UsersToVisit{Inst};
2968   SmallPtrSet<const User *, 16> Visited;
2969   bool ReduxExtracted = false;
2970 
2971   while (!UsersToVisit.empty()) {
2972     auto User = UsersToVisit.back();
2973     UsersToVisit.pop_back();
2974     if (!Visited.insert(User).second)
2975       continue;
2976 
2977     for (const auto &U : User->users()) {
2978       auto Inst = dyn_cast<Instruction>(U);
2979       if (!Inst)
2980         return false;
2981 
2982       if (Inst->getOpcode() == OpCode || isa<PHINode>(U)) {
2983         if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(Inst))
2984           if (!isa<PHINode>(FPOp) && !FPOp->getFastMathFlags().isFast())
2985             return false;
2986         UsersToVisit.push_back(U);
2987       } else if (const ShuffleVectorInst *ShufInst =
2988                      dyn_cast<ShuffleVectorInst>(U)) {
2989         // Detect the following pattern: A ShuffleVector instruction together
2990         // with a reduction that do partial reduction on the first and second
2991         // ElemNumToReduce / 2 elements, and store the result in
2992         // ElemNumToReduce / 2 elements in another vector.
2993 
2994         unsigned ResultElements = ShufInst->getType()->getVectorNumElements();
2995         if (ResultElements < ElemNum)
2996           return false;
2997 
2998         if (ElemNumToReduce == 1)
2999           return false;
3000         if (!isa<UndefValue>(U->getOperand(1)))
3001           return false;
3002         for (unsigned i = 0; i < ElemNumToReduce / 2; ++i)
3003           if (ShufInst->getMaskValue(i) != int(i + ElemNumToReduce / 2))
3004             return false;
3005         for (unsigned i = ElemNumToReduce / 2; i < ElemNum; ++i)
3006           if (ShufInst->getMaskValue(i) != -1)
3007             return false;
3008 
3009         // There is only one user of this ShuffleVector instruction, which
3010         // must be a reduction operation.
3011         if (!U->hasOneUse())
3012           return false;
3013 
3014         auto U2 = dyn_cast<Instruction>(*U->user_begin());
3015         if (!U2 || U2->getOpcode() != OpCode)
3016           return false;
3017 
3018         // Check operands of the reduction operation.
3019         if ((U2->getOperand(0) == U->getOperand(0) && U2->getOperand(1) == U) ||
3020             (U2->getOperand(1) == U->getOperand(0) && U2->getOperand(0) == U)) {
3021           UsersToVisit.push_back(U2);
3022           ElemNumToReduce /= 2;
3023         } else
3024           return false;
3025       } else if (isa<ExtractElementInst>(U)) {
3026         // At this moment we should have reduced all elements in the vector.
3027         if (ElemNumToReduce != 1)
3028           return false;
3029 
3030         const ConstantInt *Val = dyn_cast<ConstantInt>(U->getOperand(1));
3031         if (!Val || !Val->isZero())
3032           return false;
3033 
3034         ReduxExtracted = true;
3035       } else
3036         return false;
3037     }
3038   }
3039   return ReduxExtracted;
3040 }
3041 
3042 void SelectionDAGBuilder::visitUnary(const User &I, unsigned Opcode) {
3043   SDNodeFlags Flags;
3044 
3045   SDValue Op = getValue(I.getOperand(0));
3046   SDValue UnNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op.getValueType(),
3047                                     Op, Flags);
3048   setValue(&I, UnNodeValue);
3049 }
3050 
3051 void SelectionDAGBuilder::visitBinary(const User &I, unsigned Opcode) {
3052   SDNodeFlags Flags;
3053   if (auto *OFBinOp = dyn_cast<OverflowingBinaryOperator>(&I)) {
3054     Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap());
3055     Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap());
3056   }
3057   if (auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I)) {
3058     Flags.setExact(ExactOp->isExact());
3059   }
3060   if (isVectorReductionOp(&I)) {
3061     Flags.setVectorReduction(true);
3062     LLVM_DEBUG(dbgs() << "Detected a reduction operation:" << I << "\n");
3063   }
3064 
3065   SDValue Op1 = getValue(I.getOperand(0));
3066   SDValue Op2 = getValue(I.getOperand(1));
3067   SDValue BinNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(),
3068                                      Op1, Op2, Flags);
3069   setValue(&I, BinNodeValue);
3070 }
3071 
3072 void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
3073   SDValue Op1 = getValue(I.getOperand(0));
3074   SDValue Op2 = getValue(I.getOperand(1));
3075 
3076   EVT ShiftTy = DAG.getTargetLoweringInfo().getShiftAmountTy(
3077       Op1.getValueType(), DAG.getDataLayout());
3078 
3079   // Coerce the shift amount to the right type if we can.
3080   if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {
3081     unsigned ShiftSize = ShiftTy.getSizeInBits();
3082     unsigned Op2Size = Op2.getValueSizeInBits();
3083     SDLoc DL = getCurSDLoc();
3084 
3085     // If the operand is smaller than the shift count type, promote it.
3086     if (ShiftSize > Op2Size)
3087       Op2 = DAG.getNode(ISD::ZERO_EXTEND, DL, ShiftTy, Op2);
3088 
3089     // If the operand is larger than the shift count type but the shift
3090     // count type has enough bits to represent any shift value, truncate
3091     // it now. This is a common case and it exposes the truncate to
3092     // optimization early.
3093     else if (ShiftSize >= Log2_32_Ceil(Op2.getValueSizeInBits()))
3094       Op2 = DAG.getNode(ISD::TRUNCATE, DL, ShiftTy, Op2);
3095     // Otherwise we'll need to temporarily settle for some other convenient
3096     // type.  Type legalization will make adjustments once the shiftee is split.
3097     else
3098       Op2 = DAG.getZExtOrTrunc(Op2, DL, MVT::i32);
3099   }
3100 
3101   bool nuw = false;
3102   bool nsw = false;
3103   bool exact = false;
3104 
3105   if (Opcode == ISD::SRL || Opcode == ISD::SRA || Opcode == ISD::SHL) {
3106 
3107     if (const OverflowingBinaryOperator *OFBinOp =
3108             dyn_cast<const OverflowingBinaryOperator>(&I)) {
3109       nuw = OFBinOp->hasNoUnsignedWrap();
3110       nsw = OFBinOp->hasNoSignedWrap();
3111     }
3112     if (const PossiblyExactOperator *ExactOp =
3113             dyn_cast<const PossiblyExactOperator>(&I))
3114       exact = ExactOp->isExact();
3115   }
3116   SDNodeFlags Flags;
3117   Flags.setExact(exact);
3118   Flags.setNoSignedWrap(nsw);
3119   Flags.setNoUnsignedWrap(nuw);
3120   SDValue Res = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(), Op1, Op2,
3121                             Flags);
3122   setValue(&I, Res);
3123 }
3124 
3125 void SelectionDAGBuilder::visitSDiv(const User &I) {
3126   SDValue Op1 = getValue(I.getOperand(0));
3127   SDValue Op2 = getValue(I.getOperand(1));
3128 
3129   SDNodeFlags Flags;
3130   Flags.setExact(isa<PossiblyExactOperator>(&I) &&
3131                  cast<PossiblyExactOperator>(&I)->isExact());
3132   setValue(&I, DAG.getNode(ISD::SDIV, getCurSDLoc(), Op1.getValueType(), Op1,
3133                            Op2, Flags));
3134 }
3135 
3136 void SelectionDAGBuilder::visitICmp(const User &I) {
3137   ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
3138   if (const ICmpInst *IC = dyn_cast<ICmpInst>(&I))
3139     predicate = IC->getPredicate();
3140   else if (const ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
3141     predicate = ICmpInst::Predicate(IC->getPredicate());
3142   SDValue Op1 = getValue(I.getOperand(0));
3143   SDValue Op2 = getValue(I.getOperand(1));
3144   ISD::CondCode Opcode = getICmpCondCode(predicate);
3145 
3146   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3147                                                         I.getType());
3148   setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Opcode));
3149 }
3150 
3151 void SelectionDAGBuilder::visitFCmp(const User &I) {
3152   FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
3153   if (const FCmpInst *FC = dyn_cast<FCmpInst>(&I))
3154     predicate = FC->getPredicate();
3155   else if (const ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
3156     predicate = FCmpInst::Predicate(FC->getPredicate());
3157   SDValue Op1 = getValue(I.getOperand(0));
3158   SDValue Op2 = getValue(I.getOperand(1));
3159 
3160   ISD::CondCode Condition = getFCmpCondCode(predicate);
3161   auto *FPMO = dyn_cast<FPMathOperator>(&I);
3162   if ((FPMO && FPMO->hasNoNaNs()) || TM.Options.NoNaNsFPMath)
3163     Condition = getFCmpCodeWithoutNaN(Condition);
3164 
3165   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3166                                                         I.getType());
3167   setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition));
3168 }
3169 
3170 // Check if the condition of the select has one use or two users that are both
3171 // selects with the same condition.
3172 static bool hasOnlySelectUsers(const Value *Cond) {
3173   return llvm::all_of(Cond->users(), [](const Value *V) {
3174     return isa<SelectInst>(V);
3175   });
3176 }
3177 
3178 void SelectionDAGBuilder::visitSelect(const User &I) {
3179   SmallVector<EVT, 4> ValueVTs;
3180   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
3181                   ValueVTs);
3182   unsigned NumValues = ValueVTs.size();
3183   if (NumValues == 0) return;
3184 
3185   SmallVector<SDValue, 4> Values(NumValues);
3186   SDValue Cond     = getValue(I.getOperand(0));
3187   SDValue LHSVal   = getValue(I.getOperand(1));
3188   SDValue RHSVal   = getValue(I.getOperand(2));
3189   auto BaseOps = {Cond};
3190   ISD::NodeType OpCode = Cond.getValueType().isVector() ?
3191     ISD::VSELECT : ISD::SELECT;
3192 
3193   // Min/max matching is only viable if all output VTs are the same.
3194   if (is_splat(ValueVTs)) {
3195     EVT VT = ValueVTs[0];
3196     LLVMContext &Ctx = *DAG.getContext();
3197     auto &TLI = DAG.getTargetLoweringInfo();
3198 
3199     // We care about the legality of the operation after it has been type
3200     // legalized.
3201     while (TLI.getTypeAction(Ctx, VT) != TargetLoweringBase::TypeLegal &&
3202            VT != TLI.getTypeToTransformTo(Ctx, VT))
3203       VT = TLI.getTypeToTransformTo(Ctx, VT);
3204 
3205     // If the vselect is legal, assume we want to leave this as a vector setcc +
3206     // vselect. Otherwise, if this is going to be scalarized, we want to see if
3207     // min/max is legal on the scalar type.
3208     bool UseScalarMinMax = VT.isVector() &&
3209       !TLI.isOperationLegalOrCustom(ISD::VSELECT, VT);
3210 
3211     Value *LHS, *RHS;
3212     auto SPR = matchSelectPattern(const_cast<User*>(&I), LHS, RHS);
3213     ISD::NodeType Opc = ISD::DELETED_NODE;
3214     switch (SPR.Flavor) {
3215     case SPF_UMAX:    Opc = ISD::UMAX; break;
3216     case SPF_UMIN:    Opc = ISD::UMIN; break;
3217     case SPF_SMAX:    Opc = ISD::SMAX; break;
3218     case SPF_SMIN:    Opc = ISD::SMIN; break;
3219     case SPF_FMINNUM:
3220       switch (SPR.NaNBehavior) {
3221       case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
3222       case SPNB_RETURNS_NAN:   Opc = ISD::FMINIMUM; break;
3223       case SPNB_RETURNS_OTHER: Opc = ISD::FMINNUM; break;
3224       case SPNB_RETURNS_ANY: {
3225         if (TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT))
3226           Opc = ISD::FMINNUM;
3227         else if (TLI.isOperationLegalOrCustom(ISD::FMINIMUM, VT))
3228           Opc = ISD::FMINIMUM;
3229         else if (UseScalarMinMax)
3230           Opc = TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT.getScalarType()) ?
3231             ISD::FMINNUM : ISD::FMINIMUM;
3232         break;
3233       }
3234       }
3235       break;
3236     case SPF_FMAXNUM:
3237       switch (SPR.NaNBehavior) {
3238       case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
3239       case SPNB_RETURNS_NAN:   Opc = ISD::FMAXIMUM; break;
3240       case SPNB_RETURNS_OTHER: Opc = ISD::FMAXNUM; break;
3241       case SPNB_RETURNS_ANY:
3242 
3243         if (TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT))
3244           Opc = ISD::FMAXNUM;
3245         else if (TLI.isOperationLegalOrCustom(ISD::FMAXIMUM, VT))
3246           Opc = ISD::FMAXIMUM;
3247         else if (UseScalarMinMax)
3248           Opc = TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT.getScalarType()) ?
3249             ISD::FMAXNUM : ISD::FMAXIMUM;
3250         break;
3251       }
3252       break;
3253     default: break;
3254     }
3255 
3256     if (Opc != ISD::DELETED_NODE &&
3257         (TLI.isOperationLegalOrCustom(Opc, VT) ||
3258          (UseScalarMinMax &&
3259           TLI.isOperationLegalOrCustom(Opc, VT.getScalarType()))) &&
3260         // If the underlying comparison instruction is used by any other
3261         // instruction, the consumed instructions won't be destroyed, so it is
3262         // not profitable to convert to a min/max.
3263         hasOnlySelectUsers(cast<SelectInst>(I).getCondition())) {
3264       OpCode = Opc;
3265       LHSVal = getValue(LHS);
3266       RHSVal = getValue(RHS);
3267       BaseOps = {};
3268     }
3269   }
3270 
3271   for (unsigned i = 0; i != NumValues; ++i) {
3272     SmallVector<SDValue, 3> Ops(BaseOps.begin(), BaseOps.end());
3273     Ops.push_back(SDValue(LHSVal.getNode(), LHSVal.getResNo() + i));
3274     Ops.push_back(SDValue(RHSVal.getNode(), RHSVal.getResNo() + i));
3275     Values[i] = DAG.getNode(OpCode, getCurSDLoc(),
3276                             LHSVal.getNode()->getValueType(LHSVal.getResNo()+i),
3277                             Ops);
3278   }
3279 
3280   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3281                            DAG.getVTList(ValueVTs), Values));
3282 }
3283 
3284 void SelectionDAGBuilder::visitTrunc(const User &I) {
3285   // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
3286   SDValue N = getValue(I.getOperand(0));
3287   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3288                                                         I.getType());
3289   setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), DestVT, N));
3290 }
3291 
3292 void SelectionDAGBuilder::visitZExt(const User &I) {
3293   // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3294   // ZExt also can't be a cast to bool for same reason. So, nothing much to do
3295   SDValue N = getValue(I.getOperand(0));
3296   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3297                                                         I.getType());
3298   setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurSDLoc(), DestVT, N));
3299 }
3300 
3301 void SelectionDAGBuilder::visitSExt(const User &I) {
3302   // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3303   // SExt also can't be a cast to bool for same reason. So, nothing much to do
3304   SDValue N = getValue(I.getOperand(0));
3305   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3306                                                         I.getType());
3307   setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N));
3308 }
3309 
3310 void SelectionDAGBuilder::visitFPTrunc(const User &I) {
3311   // FPTrunc is never a no-op cast, no need to check
3312   SDValue N = getValue(I.getOperand(0));
3313   SDLoc dl = getCurSDLoc();
3314   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3315   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3316   setValue(&I, DAG.getNode(ISD::FP_ROUND, dl, DestVT, N,
3317                            DAG.getTargetConstant(
3318                                0, dl, TLI.getPointerTy(DAG.getDataLayout()))));
3319 }
3320 
3321 void SelectionDAGBuilder::visitFPExt(const User &I) {
3322   // FPExt is never a no-op cast, no need to check
3323   SDValue N = getValue(I.getOperand(0));
3324   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3325                                                         I.getType());
3326   setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurSDLoc(), DestVT, N));
3327 }
3328 
3329 void SelectionDAGBuilder::visitFPToUI(const User &I) {
3330   // FPToUI is never a no-op cast, no need to check
3331   SDValue N = getValue(I.getOperand(0));
3332   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3333                                                         I.getType());
3334   setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurSDLoc(), DestVT, N));
3335 }
3336 
3337 void SelectionDAGBuilder::visitFPToSI(const User &I) {
3338   // FPToSI is never a no-op cast, no need to check
3339   SDValue N = getValue(I.getOperand(0));
3340   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3341                                                         I.getType());
3342   setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurSDLoc(), DestVT, N));
3343 }
3344 
3345 void SelectionDAGBuilder::visitUIToFP(const User &I) {
3346   // UIToFP is never a no-op cast, no need to check
3347   SDValue N = getValue(I.getOperand(0));
3348   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3349                                                         I.getType());
3350   setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurSDLoc(), DestVT, N));
3351 }
3352 
3353 void SelectionDAGBuilder::visitSIToFP(const User &I) {
3354   // SIToFP is never a no-op cast, no need to check
3355   SDValue N = getValue(I.getOperand(0));
3356   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3357                                                         I.getType());
3358   setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurSDLoc(), DestVT, N));
3359 }
3360 
3361 void SelectionDAGBuilder::visitPtrToInt(const User &I) {
3362   // What to do depends on the size of the integer and the size of the pointer.
3363   // We can either truncate, zero extend, or no-op, accordingly.
3364   SDValue N = getValue(I.getOperand(0));
3365   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3366                                                         I.getType());
3367   setValue(&I, DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT));
3368 }
3369 
3370 void SelectionDAGBuilder::visitIntToPtr(const User &I) {
3371   // What to do depends on the size of the integer and the size of the pointer.
3372   // We can either truncate, zero extend, or no-op, accordingly.
3373   SDValue N = getValue(I.getOperand(0));
3374   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3375                                                         I.getType());
3376   setValue(&I, DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT));
3377 }
3378 
3379 void SelectionDAGBuilder::visitBitCast(const User &I) {
3380   SDValue N = getValue(I.getOperand(0));
3381   SDLoc dl = getCurSDLoc();
3382   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3383                                                         I.getType());
3384 
3385   // BitCast assures us that source and destination are the same size so this is
3386   // either a BITCAST or a no-op.
3387   if (DestVT != N.getValueType())
3388     setValue(&I, DAG.getNode(ISD::BITCAST, dl,
3389                              DestVT, N)); // convert types.
3390   // Check if the original LLVM IR Operand was a ConstantInt, because getValue()
3391   // might fold any kind of constant expression to an integer constant and that
3392   // is not what we are looking for. Only recognize a bitcast of a genuine
3393   // constant integer as an opaque constant.
3394   else if(ConstantInt *C = dyn_cast<ConstantInt>(I.getOperand(0)))
3395     setValue(&I, DAG.getConstant(C->getValue(), dl, DestVT, /*isTarget=*/false,
3396                                  /*isOpaque*/true));
3397   else
3398     setValue(&I, N);            // noop cast.
3399 }
3400 
3401 void SelectionDAGBuilder::visitAddrSpaceCast(const User &I) {
3402   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3403   const Value *SV = I.getOperand(0);
3404   SDValue N = getValue(SV);
3405   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3406 
3407   unsigned SrcAS = SV->getType()->getPointerAddressSpace();
3408   unsigned DestAS = I.getType()->getPointerAddressSpace();
3409 
3410   if (!TLI.isNoopAddrSpaceCast(SrcAS, DestAS))
3411     N = DAG.getAddrSpaceCast(getCurSDLoc(), DestVT, N, SrcAS, DestAS);
3412 
3413   setValue(&I, N);
3414 }
3415 
3416 void SelectionDAGBuilder::visitInsertElement(const User &I) {
3417   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3418   SDValue InVec = getValue(I.getOperand(0));
3419   SDValue InVal = getValue(I.getOperand(1));
3420   SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(2)), getCurSDLoc(),
3421                                      TLI.getVectorIdxTy(DAG.getDataLayout()));
3422   setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurSDLoc(),
3423                            TLI.getValueType(DAG.getDataLayout(), I.getType()),
3424                            InVec, InVal, InIdx));
3425 }
3426 
3427 void SelectionDAGBuilder::visitExtractElement(const User &I) {
3428   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3429   SDValue InVec = getValue(I.getOperand(0));
3430   SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(1)), getCurSDLoc(),
3431                                      TLI.getVectorIdxTy(DAG.getDataLayout()));
3432   setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(),
3433                            TLI.getValueType(DAG.getDataLayout(), I.getType()),
3434                            InVec, InIdx));
3435 }
3436 
3437 void SelectionDAGBuilder::visitShuffleVector(const User &I) {
3438   SDValue Src1 = getValue(I.getOperand(0));
3439   SDValue Src2 = getValue(I.getOperand(1));
3440   SDLoc DL = getCurSDLoc();
3441 
3442   SmallVector<int, 8> Mask;
3443   ShuffleVectorInst::getShuffleMask(cast<Constant>(I.getOperand(2)), Mask);
3444   unsigned MaskNumElts = Mask.size();
3445 
3446   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3447   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3448   EVT SrcVT = Src1.getValueType();
3449   unsigned SrcNumElts = SrcVT.getVectorNumElements();
3450 
3451   if (SrcNumElts == MaskNumElts) {
3452     setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, Mask));
3453     return;
3454   }
3455 
3456   // Normalize the shuffle vector since mask and vector length don't match.
3457   if (SrcNumElts < MaskNumElts) {
3458     // Mask is longer than the source vectors. We can use concatenate vector to
3459     // make the mask and vectors lengths match.
3460 
3461     if (MaskNumElts % SrcNumElts == 0) {
3462       // Mask length is a multiple of the source vector length.
3463       // Check if the shuffle is some kind of concatenation of the input
3464       // vectors.
3465       unsigned NumConcat = MaskNumElts / SrcNumElts;
3466       bool IsConcat = true;
3467       SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
3468       for (unsigned i = 0; i != MaskNumElts; ++i) {
3469         int Idx = Mask[i];
3470         if (Idx < 0)
3471           continue;
3472         // Ensure the indices in each SrcVT sized piece are sequential and that
3473         // the same source is used for the whole piece.
3474         if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
3475             (ConcatSrcs[i / SrcNumElts] >= 0 &&
3476              ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts))) {
3477           IsConcat = false;
3478           break;
3479         }
3480         // Remember which source this index came from.
3481         ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
3482       }
3483 
3484       // The shuffle is concatenating multiple vectors together. Just emit
3485       // a CONCAT_VECTORS operation.
3486       if (IsConcat) {
3487         SmallVector<SDValue, 8> ConcatOps;
3488         for (auto Src : ConcatSrcs) {
3489           if (Src < 0)
3490             ConcatOps.push_back(DAG.getUNDEF(SrcVT));
3491           else if (Src == 0)
3492             ConcatOps.push_back(Src1);
3493           else
3494             ConcatOps.push_back(Src2);
3495         }
3496         setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps));
3497         return;
3498       }
3499     }
3500 
3501     unsigned PaddedMaskNumElts = alignTo(MaskNumElts, SrcNumElts);
3502     unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
3503     EVT PaddedVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(),
3504                                     PaddedMaskNumElts);
3505 
3506     // Pad both vectors with undefs to make them the same length as the mask.
3507     SDValue UndefVal = DAG.getUNDEF(SrcVT);
3508 
3509     SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
3510     SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
3511     MOps1[0] = Src1;
3512     MOps2[0] = Src2;
3513 
3514     Src1 = Src1.isUndef()
3515                ? DAG.getUNDEF(PaddedVT)
3516                : DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps1);
3517     Src2 = Src2.isUndef()
3518                ? DAG.getUNDEF(PaddedVT)
3519                : DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps2);
3520 
3521     // Readjust mask for new input vector length.
3522     SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1);
3523     for (unsigned i = 0; i != MaskNumElts; ++i) {
3524       int Idx = Mask[i];
3525       if (Idx >= (int)SrcNumElts)
3526         Idx -= SrcNumElts - PaddedMaskNumElts;
3527       MappedOps[i] = Idx;
3528     }
3529 
3530     SDValue Result = DAG.getVectorShuffle(PaddedVT, DL, Src1, Src2, MappedOps);
3531 
3532     // If the concatenated vector was padded, extract a subvector with the
3533     // correct number of elements.
3534     if (MaskNumElts != PaddedMaskNumElts)
3535       Result = DAG.getNode(
3536           ISD::EXTRACT_SUBVECTOR, DL, VT, Result,
3537           DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
3538 
3539     setValue(&I, Result);
3540     return;
3541   }
3542 
3543   if (SrcNumElts > MaskNumElts) {
3544     // Analyze the access pattern of the vector to see if we can extract
3545     // two subvectors and do the shuffle.
3546     int StartIdx[2] = { -1, -1 };  // StartIdx to extract from
3547     bool CanExtract = true;
3548     for (int Idx : Mask) {
3549       unsigned Input = 0;
3550       if (Idx < 0)
3551         continue;
3552 
3553       if (Idx >= (int)SrcNumElts) {
3554         Input = 1;
3555         Idx -= SrcNumElts;
3556       }
3557 
3558       // If all the indices come from the same MaskNumElts sized portion of
3559       // the sources we can use extract. Also make sure the extract wouldn't
3560       // extract past the end of the source.
3561       int NewStartIdx = alignDown(Idx, MaskNumElts);
3562       if (NewStartIdx + MaskNumElts > SrcNumElts ||
3563           (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
3564         CanExtract = false;
3565       // Make sure we always update StartIdx as we use it to track if all
3566       // elements are undef.
3567       StartIdx[Input] = NewStartIdx;
3568     }
3569 
3570     if (StartIdx[0] < 0 && StartIdx[1] < 0) {
3571       setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
3572       return;
3573     }
3574     if (CanExtract) {
3575       // Extract appropriate subvector and generate a vector shuffle
3576       for (unsigned Input = 0; Input < 2; ++Input) {
3577         SDValue &Src = Input == 0 ? Src1 : Src2;
3578         if (StartIdx[Input] < 0)
3579           Src = DAG.getUNDEF(VT);
3580         else {
3581           Src = DAG.getNode(
3582               ISD::EXTRACT_SUBVECTOR, DL, VT, Src,
3583               DAG.getConstant(StartIdx[Input], DL,
3584                               TLI.getVectorIdxTy(DAG.getDataLayout())));
3585         }
3586       }
3587 
3588       // Calculate new mask.
3589       SmallVector<int, 8> MappedOps(Mask.begin(), Mask.end());
3590       for (int &Idx : MappedOps) {
3591         if (Idx >= (int)SrcNumElts)
3592           Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
3593         else if (Idx >= 0)
3594           Idx -= StartIdx[0];
3595       }
3596 
3597       setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, MappedOps));
3598       return;
3599     }
3600   }
3601 
3602   // We can't use either concat vectors or extract subvectors so fall back to
3603   // replacing the shuffle with extract and build vector.
3604   // to insert and build vector.
3605   EVT EltVT = VT.getVectorElementType();
3606   EVT IdxVT = TLI.getVectorIdxTy(DAG.getDataLayout());
3607   SmallVector<SDValue,8> Ops;
3608   for (int Idx : Mask) {
3609     SDValue Res;
3610 
3611     if (Idx < 0) {
3612       Res = DAG.getUNDEF(EltVT);
3613     } else {
3614       SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
3615       if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts;
3616 
3617       Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
3618                         EltVT, Src, DAG.getConstant(Idx, DL, IdxVT));
3619     }
3620 
3621     Ops.push_back(Res);
3622   }
3623 
3624   setValue(&I, DAG.getBuildVector(VT, DL, Ops));
3625 }
3626 
3627 void SelectionDAGBuilder::visitInsertValue(const User &I) {
3628   ArrayRef<unsigned> Indices;
3629   if (const InsertValueInst *IV = dyn_cast<InsertValueInst>(&I))
3630     Indices = IV->getIndices();
3631   else
3632     Indices = cast<ConstantExpr>(&I)->getIndices();
3633 
3634   const Value *Op0 = I.getOperand(0);
3635   const Value *Op1 = I.getOperand(1);
3636   Type *AggTy = I.getType();
3637   Type *ValTy = Op1->getType();
3638   bool IntoUndef = isa<UndefValue>(Op0);
3639   bool FromUndef = isa<UndefValue>(Op1);
3640 
3641   unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
3642 
3643   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3644   SmallVector<EVT, 4> AggValueVTs;
3645   ComputeValueVTs(TLI, DAG.getDataLayout(), AggTy, AggValueVTs);
3646   SmallVector<EVT, 4> ValValueVTs;
3647   ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
3648 
3649   unsigned NumAggValues = AggValueVTs.size();
3650   unsigned NumValValues = ValValueVTs.size();
3651   SmallVector<SDValue, 4> Values(NumAggValues);
3652 
3653   // Ignore an insertvalue that produces an empty object
3654   if (!NumAggValues) {
3655     setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
3656     return;
3657   }
3658 
3659   SDValue Agg = getValue(Op0);
3660   unsigned i = 0;
3661   // Copy the beginning value(s) from the original aggregate.
3662   for (; i != LinearIndex; ++i)
3663     Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3664                 SDValue(Agg.getNode(), Agg.getResNo() + i);
3665   // Copy values from the inserted value(s).
3666   if (NumValValues) {
3667     SDValue Val = getValue(Op1);
3668     for (; i != LinearIndex + NumValValues; ++i)
3669       Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3670                   SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
3671   }
3672   // Copy remaining value(s) from the original aggregate.
3673   for (; i != NumAggValues; ++i)
3674     Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3675                 SDValue(Agg.getNode(), Agg.getResNo() + i);
3676 
3677   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3678                            DAG.getVTList(AggValueVTs), Values));
3679 }
3680 
3681 void SelectionDAGBuilder::visitExtractValue(const User &I) {
3682   ArrayRef<unsigned> Indices;
3683   if (const ExtractValueInst *EV = dyn_cast<ExtractValueInst>(&I))
3684     Indices = EV->getIndices();
3685   else
3686     Indices = cast<ConstantExpr>(&I)->getIndices();
3687 
3688   const Value *Op0 = I.getOperand(0);
3689   Type *AggTy = Op0->getType();
3690   Type *ValTy = I.getType();
3691   bool OutOfUndef = isa<UndefValue>(Op0);
3692 
3693   unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
3694 
3695   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3696   SmallVector<EVT, 4> ValValueVTs;
3697   ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
3698 
3699   unsigned NumValValues = ValValueVTs.size();
3700 
3701   // Ignore a extractvalue that produces an empty object
3702   if (!NumValValues) {
3703     setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
3704     return;
3705   }
3706 
3707   SmallVector<SDValue, 4> Values(NumValValues);
3708 
3709   SDValue Agg = getValue(Op0);
3710   // Copy out the selected value(s).
3711   for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
3712     Values[i - LinearIndex] =
3713       OutOfUndef ?
3714         DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
3715         SDValue(Agg.getNode(), Agg.getResNo() + i);
3716 
3717   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3718                            DAG.getVTList(ValValueVTs), Values));
3719 }
3720 
3721 void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
3722   Value *Op0 = I.getOperand(0);
3723   // Note that the pointer operand may be a vector of pointers. Take the scalar
3724   // element which holds a pointer.
3725   unsigned AS = Op0->getType()->getScalarType()->getPointerAddressSpace();
3726   SDValue N = getValue(Op0);
3727   SDLoc dl = getCurSDLoc();
3728 
3729   // Normalize Vector GEP - all scalar operands should be converted to the
3730   // splat vector.
3731   unsigned VectorWidth = I.getType()->isVectorTy() ?
3732     cast<VectorType>(I.getType())->getVectorNumElements() : 0;
3733 
3734   if (VectorWidth && !N.getValueType().isVector()) {
3735     LLVMContext &Context = *DAG.getContext();
3736     EVT VT = EVT::getVectorVT(Context, N.getValueType(), VectorWidth);
3737     N = DAG.getSplatBuildVector(VT, dl, N);
3738   }
3739 
3740   for (gep_type_iterator GTI = gep_type_begin(&I), E = gep_type_end(&I);
3741        GTI != E; ++GTI) {
3742     const Value *Idx = GTI.getOperand();
3743     if (StructType *StTy = GTI.getStructTypeOrNull()) {
3744       unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
3745       if (Field) {
3746         // N = N + Offset
3747         uint64_t Offset = DL->getStructLayout(StTy)->getElementOffset(Field);
3748 
3749         // In an inbounds GEP with an offset that is nonnegative even when
3750         // interpreted as signed, assume there is no unsigned overflow.
3751         SDNodeFlags Flags;
3752         if (int64_t(Offset) >= 0 && cast<GEPOperator>(I).isInBounds())
3753           Flags.setNoUnsignedWrap(true);
3754 
3755         N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N,
3756                         DAG.getConstant(Offset, dl, N.getValueType()), Flags);
3757       }
3758     } else {
3759       unsigned IdxSize = DAG.getDataLayout().getIndexSizeInBits(AS);
3760       MVT IdxTy = MVT::getIntegerVT(IdxSize);
3761       APInt ElementSize(IdxSize, DL->getTypeAllocSize(GTI.getIndexedType()));
3762 
3763       // If this is a scalar constant or a splat vector of constants,
3764       // handle it quickly.
3765       const auto *CI = dyn_cast<ConstantInt>(Idx);
3766       if (!CI && isa<ConstantDataVector>(Idx) &&
3767           cast<ConstantDataVector>(Idx)->getSplatValue())
3768         CI = cast<ConstantInt>(cast<ConstantDataVector>(Idx)->getSplatValue());
3769 
3770       if (CI) {
3771         if (CI->isZero())
3772           continue;
3773         APInt Offs = ElementSize * CI->getValue().sextOrTrunc(IdxSize);
3774         LLVMContext &Context = *DAG.getContext();
3775         SDValue OffsVal = VectorWidth ?
3776           DAG.getConstant(Offs, dl, EVT::getVectorVT(Context, IdxTy, VectorWidth)) :
3777           DAG.getConstant(Offs, dl, IdxTy);
3778 
3779         // In an inbouds GEP with an offset that is nonnegative even when
3780         // interpreted as signed, assume there is no unsigned overflow.
3781         SDNodeFlags Flags;
3782         if (Offs.isNonNegative() && cast<GEPOperator>(I).isInBounds())
3783           Flags.setNoUnsignedWrap(true);
3784 
3785         N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N, OffsVal, Flags);
3786         continue;
3787       }
3788 
3789       // N = N + Idx * ElementSize;
3790       SDValue IdxN = getValue(Idx);
3791 
3792       if (!IdxN.getValueType().isVector() && VectorWidth) {
3793         EVT VT = EVT::getVectorVT(*Context, IdxN.getValueType(), VectorWidth);
3794         IdxN = DAG.getSplatBuildVector(VT, dl, IdxN);
3795       }
3796 
3797       // If the index is smaller or larger than intptr_t, truncate or extend
3798       // it.
3799       IdxN = DAG.getSExtOrTrunc(IdxN, dl, N.getValueType());
3800 
3801       // If this is a multiply by a power of two, turn it into a shl
3802       // immediately.  This is a very common case.
3803       if (ElementSize != 1) {
3804         if (ElementSize.isPowerOf2()) {
3805           unsigned Amt = ElementSize.logBase2();
3806           IdxN = DAG.getNode(ISD::SHL, dl,
3807                              N.getValueType(), IdxN,
3808                              DAG.getConstant(Amt, dl, IdxN.getValueType()));
3809         } else {
3810           SDValue Scale = DAG.getConstant(ElementSize, dl, IdxN.getValueType());
3811           IdxN = DAG.getNode(ISD::MUL, dl,
3812                              N.getValueType(), IdxN, Scale);
3813         }
3814       }
3815 
3816       N = DAG.getNode(ISD::ADD, dl,
3817                       N.getValueType(), N, IdxN);
3818     }
3819   }
3820 
3821   setValue(&I, N);
3822 }
3823 
3824 void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
3825   // If this is a fixed sized alloca in the entry block of the function,
3826   // allocate it statically on the stack.
3827   if (FuncInfo.StaticAllocaMap.count(&I))
3828     return;   // getValue will auto-populate this.
3829 
3830   SDLoc dl = getCurSDLoc();
3831   Type *Ty = I.getAllocatedType();
3832   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3833   auto &DL = DAG.getDataLayout();
3834   uint64_t TySize = DL.getTypeAllocSize(Ty);
3835   unsigned Align =
3836       std::max((unsigned)DL.getPrefTypeAlignment(Ty), I.getAlignment());
3837 
3838   SDValue AllocSize = getValue(I.getArraySize());
3839 
3840   EVT IntPtr = TLI.getPointerTy(DAG.getDataLayout(), DL.getAllocaAddrSpace());
3841   if (AllocSize.getValueType() != IntPtr)
3842     AllocSize = DAG.getZExtOrTrunc(AllocSize, dl, IntPtr);
3843 
3844   AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr,
3845                           AllocSize,
3846                           DAG.getConstant(TySize, dl, IntPtr));
3847 
3848   // Handle alignment.  If the requested alignment is less than or equal to
3849   // the stack alignment, ignore it.  If the size is greater than or equal to
3850   // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
3851   unsigned StackAlign =
3852       DAG.getSubtarget().getFrameLowering()->getStackAlignment();
3853   if (Align <= StackAlign)
3854     Align = 0;
3855 
3856   // Round the size of the allocation up to the stack alignment size
3857   // by add SA-1 to the size. This doesn't overflow because we're computing
3858   // an address inside an alloca.
3859   SDNodeFlags Flags;
3860   Flags.setNoUnsignedWrap(true);
3861   AllocSize = DAG.getNode(ISD::ADD, dl, AllocSize.getValueType(), AllocSize,
3862                           DAG.getConstant(StackAlign - 1, dl, IntPtr), Flags);
3863 
3864   // Mask out the low bits for alignment purposes.
3865   AllocSize =
3866       DAG.getNode(ISD::AND, dl, AllocSize.getValueType(), AllocSize,
3867                   DAG.getConstant(~(uint64_t)(StackAlign - 1), dl, IntPtr));
3868 
3869   SDValue Ops[] = {getRoot(), AllocSize, DAG.getConstant(Align, dl, IntPtr)};
3870   SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
3871   SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, dl, VTs, Ops);
3872   setValue(&I, DSA);
3873   DAG.setRoot(DSA.getValue(1));
3874 
3875   assert(FuncInfo.MF->getFrameInfo().hasVarSizedObjects());
3876 }
3877 
3878 void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
3879   if (I.isAtomic())
3880     return visitAtomicLoad(I);
3881 
3882   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3883   const Value *SV = I.getOperand(0);
3884   if (TLI.supportSwiftError()) {
3885     // Swifterror values can come from either a function parameter with
3886     // swifterror attribute or an alloca with swifterror attribute.
3887     if (const Argument *Arg = dyn_cast<Argument>(SV)) {
3888       if (Arg->hasSwiftErrorAttr())
3889         return visitLoadFromSwiftError(I);
3890     }
3891 
3892     if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
3893       if (Alloca->isSwiftError())
3894         return visitLoadFromSwiftError(I);
3895     }
3896   }
3897 
3898   SDValue Ptr = getValue(SV);
3899 
3900   Type *Ty = I.getType();
3901 
3902   bool isVolatile = I.isVolatile();
3903   bool isNonTemporal = I.getMetadata(LLVMContext::MD_nontemporal) != nullptr;
3904   bool isInvariant = I.getMetadata(LLVMContext::MD_invariant_load) != nullptr;
3905   bool isDereferenceable = isDereferenceablePointer(SV, DAG.getDataLayout());
3906   unsigned Alignment = I.getAlignment();
3907 
3908   AAMDNodes AAInfo;
3909   I.getAAMetadata(AAInfo);
3910   const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
3911 
3912   SmallVector<EVT, 4> ValueVTs;
3913   SmallVector<uint64_t, 4> Offsets;
3914   ComputeValueVTs(TLI, DAG.getDataLayout(), Ty, ValueVTs, &Offsets);
3915   unsigned NumValues = ValueVTs.size();
3916   if (NumValues == 0)
3917     return;
3918 
3919   SDValue Root;
3920   bool ConstantMemory = false;
3921   if (isVolatile || NumValues > MaxParallelChains)
3922     // Serialize volatile loads with other side effects.
3923     Root = getRoot();
3924   else if (AA &&
3925            AA->pointsToConstantMemory(MemoryLocation(
3926                SV,
3927                LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)),
3928                AAInfo))) {
3929     // Do not serialize (non-volatile) loads of constant memory with anything.
3930     Root = DAG.getEntryNode();
3931     ConstantMemory = true;
3932   } else {
3933     // Do not serialize non-volatile loads against each other.
3934     Root = DAG.getRoot();
3935   }
3936 
3937   SDLoc dl = getCurSDLoc();
3938 
3939   if (isVolatile)
3940     Root = TLI.prepareVolatileOrAtomicLoad(Root, dl, DAG);
3941 
3942   // An aggregate load cannot wrap around the address space, so offsets to its
3943   // parts don't wrap either.
3944   SDNodeFlags Flags;
3945   Flags.setNoUnsignedWrap(true);
3946 
3947   SmallVector<SDValue, 4> Values(NumValues);
3948   SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
3949   EVT PtrVT = Ptr.getValueType();
3950   unsigned ChainI = 0;
3951   for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
3952     // Serializing loads here may result in excessive register pressure, and
3953     // TokenFactor places arbitrary choke points on the scheduler. SD scheduling
3954     // could recover a bit by hoisting nodes upward in the chain by recognizing
3955     // they are side-effect free or do not alias. The optimizer should really
3956     // avoid this case by converting large object/array copies to llvm.memcpy
3957     // (MaxParallelChains should always remain as failsafe).
3958     if (ChainI == MaxParallelChains) {
3959       assert(PendingLoads.empty() && "PendingLoads must be serialized first");
3960       SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3961                                   makeArrayRef(Chains.data(), ChainI));
3962       Root = Chain;
3963       ChainI = 0;
3964     }
3965     SDValue A = DAG.getNode(ISD::ADD, dl,
3966                             PtrVT, Ptr,
3967                             DAG.getConstant(Offsets[i], dl, PtrVT),
3968                             Flags);
3969     auto MMOFlags = MachineMemOperand::MONone;
3970     if (isVolatile)
3971       MMOFlags |= MachineMemOperand::MOVolatile;
3972     if (isNonTemporal)
3973       MMOFlags |= MachineMemOperand::MONonTemporal;
3974     if (isInvariant)
3975       MMOFlags |= MachineMemOperand::MOInvariant;
3976     if (isDereferenceable)
3977       MMOFlags |= MachineMemOperand::MODereferenceable;
3978     MMOFlags |= TLI.getMMOFlags(I);
3979 
3980     SDValue L = DAG.getLoad(ValueVTs[i], dl, Root, A,
3981                             MachinePointerInfo(SV, Offsets[i]), Alignment,
3982                             MMOFlags, AAInfo, Ranges);
3983 
3984     Values[i] = L;
3985     Chains[ChainI] = L.getValue(1);
3986   }
3987 
3988   if (!ConstantMemory) {
3989     SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3990                                 makeArrayRef(Chains.data(), ChainI));
3991     if (isVolatile)
3992       DAG.setRoot(Chain);
3993     else
3994       PendingLoads.push_back(Chain);
3995   }
3996 
3997   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, dl,
3998                            DAG.getVTList(ValueVTs), Values));
3999 }
4000 
4001 void SelectionDAGBuilder::visitStoreToSwiftError(const StoreInst &I) {
4002   assert(DAG.getTargetLoweringInfo().supportSwiftError() &&
4003          "call visitStoreToSwiftError when backend supports swifterror");
4004 
4005   SmallVector<EVT, 4> ValueVTs;
4006   SmallVector<uint64_t, 4> Offsets;
4007   const Value *SrcV = I.getOperand(0);
4008   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
4009                   SrcV->getType(), ValueVTs, &Offsets);
4010   assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
4011          "expect a single EVT for swifterror");
4012 
4013   SDValue Src = getValue(SrcV);
4014   // Create a virtual register, then update the virtual register.
4015   unsigned VReg; bool CreatedVReg;
4016   std::tie(VReg, CreatedVReg) = FuncInfo.getOrCreateSwiftErrorVRegDefAt(&I);
4017   // Chain, DL, Reg, N or Chain, DL, Reg, N, Glue
4018   // Chain can be getRoot or getControlRoot.
4019   SDValue CopyNode = DAG.getCopyToReg(getRoot(), getCurSDLoc(), VReg,
4020                                       SDValue(Src.getNode(), Src.getResNo()));
4021   DAG.setRoot(CopyNode);
4022   if (CreatedVReg)
4023     FuncInfo.setCurrentSwiftErrorVReg(FuncInfo.MBB, I.getOperand(1), VReg);
4024 }
4025 
4026 void SelectionDAGBuilder::visitLoadFromSwiftError(const LoadInst &I) {
4027   assert(DAG.getTargetLoweringInfo().supportSwiftError() &&
4028          "call visitLoadFromSwiftError when backend supports swifterror");
4029 
4030   assert(!I.isVolatile() &&
4031          I.getMetadata(LLVMContext::MD_nontemporal) == nullptr &&
4032          I.getMetadata(LLVMContext::MD_invariant_load) == nullptr &&
4033          "Support volatile, non temporal, invariant for load_from_swift_error");
4034 
4035   const Value *SV = I.getOperand(0);
4036   Type *Ty = I.getType();
4037   AAMDNodes AAInfo;
4038   I.getAAMetadata(AAInfo);
4039   assert(
4040       (!AA ||
4041        !AA->pointsToConstantMemory(MemoryLocation(
4042            SV, LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)),
4043            AAInfo))) &&
4044       "load_from_swift_error should not be constant memory");
4045 
4046   SmallVector<EVT, 4> ValueVTs;
4047   SmallVector<uint64_t, 4> Offsets;
4048   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), Ty,
4049                   ValueVTs, &Offsets);
4050   assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
4051          "expect a single EVT for swifterror");
4052 
4053   // Chain, DL, Reg, VT, Glue or Chain, DL, Reg, VT
4054   SDValue L = DAG.getCopyFromReg(
4055       getRoot(), getCurSDLoc(),
4056       FuncInfo.getOrCreateSwiftErrorVRegUseAt(&I, FuncInfo.MBB, SV).first,
4057       ValueVTs[0]);
4058 
4059   setValue(&I, L);
4060 }
4061 
4062 void SelectionDAGBuilder::visitStore(const StoreInst &I) {
4063   if (I.isAtomic())
4064     return visitAtomicStore(I);
4065 
4066   const Value *SrcV = I.getOperand(0);
4067   const Value *PtrV = I.getOperand(1);
4068 
4069   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4070   if (TLI.supportSwiftError()) {
4071     // Swifterror values can come from either a function parameter with
4072     // swifterror attribute or an alloca with swifterror attribute.
4073     if (const Argument *Arg = dyn_cast<Argument>(PtrV)) {
4074       if (Arg->hasSwiftErrorAttr())
4075         return visitStoreToSwiftError(I);
4076     }
4077 
4078     if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
4079       if (Alloca->isSwiftError())
4080         return visitStoreToSwiftError(I);
4081     }
4082   }
4083 
4084   SmallVector<EVT, 4> ValueVTs;
4085   SmallVector<uint64_t, 4> Offsets;
4086   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
4087                   SrcV->getType(), ValueVTs, &Offsets);
4088   unsigned NumValues = ValueVTs.size();
4089   if (NumValues == 0)
4090     return;
4091 
4092   // Get the lowered operands. Note that we do this after
4093   // checking if NumResults is zero, because with zero results
4094   // the operands won't have values in the map.
4095   SDValue Src = getValue(SrcV);
4096   SDValue Ptr = getValue(PtrV);
4097 
4098   SDValue Root = getRoot();
4099   SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
4100   SDLoc dl = getCurSDLoc();
4101   EVT PtrVT = Ptr.getValueType();
4102   unsigned Alignment = I.getAlignment();
4103   AAMDNodes AAInfo;
4104   I.getAAMetadata(AAInfo);
4105 
4106   auto MMOFlags = MachineMemOperand::MONone;
4107   if (I.isVolatile())
4108     MMOFlags |= MachineMemOperand::MOVolatile;
4109   if (I.getMetadata(LLVMContext::MD_nontemporal) != nullptr)
4110     MMOFlags |= MachineMemOperand::MONonTemporal;
4111   MMOFlags |= TLI.getMMOFlags(I);
4112 
4113   // An aggregate load cannot wrap around the address space, so offsets to its
4114   // parts don't wrap either.
4115   SDNodeFlags Flags;
4116   Flags.setNoUnsignedWrap(true);
4117 
4118   unsigned ChainI = 0;
4119   for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4120     // See visitLoad comments.
4121     if (ChainI == MaxParallelChains) {
4122       SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4123                                   makeArrayRef(Chains.data(), ChainI));
4124       Root = Chain;
4125       ChainI = 0;
4126     }
4127     SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, Ptr,
4128                               DAG.getConstant(Offsets[i], dl, PtrVT), Flags);
4129     SDValue St = DAG.getStore(
4130         Root, dl, SDValue(Src.getNode(), Src.getResNo() + i), Add,
4131         MachinePointerInfo(PtrV, Offsets[i]), Alignment, MMOFlags, AAInfo);
4132     Chains[ChainI] = St;
4133   }
4134 
4135   SDValue StoreNode = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4136                                   makeArrayRef(Chains.data(), ChainI));
4137   DAG.setRoot(StoreNode);
4138 }
4139 
4140 void SelectionDAGBuilder::visitMaskedStore(const CallInst &I,
4141                                            bool IsCompressing) {
4142   SDLoc sdl = getCurSDLoc();
4143 
4144   auto getMaskedStoreOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
4145                            unsigned& Alignment) {
4146     // llvm.masked.store.*(Src0, Ptr, alignment, Mask)
4147     Src0 = I.getArgOperand(0);
4148     Ptr = I.getArgOperand(1);
4149     Alignment = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue();
4150     Mask = I.getArgOperand(3);
4151   };
4152   auto getCompressingStoreOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
4153                            unsigned& Alignment) {
4154     // llvm.masked.compressstore.*(Src0, Ptr, Mask)
4155     Src0 = I.getArgOperand(0);
4156     Ptr = I.getArgOperand(1);
4157     Mask = I.getArgOperand(2);
4158     Alignment = 0;
4159   };
4160 
4161   Value  *PtrOperand, *MaskOperand, *Src0Operand;
4162   unsigned Alignment;
4163   if (IsCompressing)
4164     getCompressingStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4165   else
4166     getMaskedStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4167 
4168   SDValue Ptr = getValue(PtrOperand);
4169   SDValue Src0 = getValue(Src0Operand);
4170   SDValue Mask = getValue(MaskOperand);
4171 
4172   EVT VT = Src0.getValueType();
4173   if (!Alignment)
4174     Alignment = DAG.getEVTAlignment(VT);
4175 
4176   AAMDNodes AAInfo;
4177   I.getAAMetadata(AAInfo);
4178 
4179   MachineMemOperand *MMO =
4180     DAG.getMachineFunction().
4181     getMachineMemOperand(MachinePointerInfo(PtrOperand),
4182                           MachineMemOperand::MOStore,  VT.getStoreSize(),
4183                           Alignment, AAInfo);
4184   SDValue StoreNode = DAG.getMaskedStore(getRoot(), sdl, Src0, Ptr, Mask, VT,
4185                                          MMO, false /* Truncating */,
4186                                          IsCompressing);
4187   DAG.setRoot(StoreNode);
4188   setValue(&I, StoreNode);
4189 }
4190 
4191 // Get a uniform base for the Gather/Scatter intrinsic.
4192 // The first argument of the Gather/Scatter intrinsic is a vector of pointers.
4193 // We try to represent it as a base pointer + vector of indices.
4194 // Usually, the vector of pointers comes from a 'getelementptr' instruction.
4195 // The first operand of the GEP may be a single pointer or a vector of pointers
4196 // Example:
4197 //   %gep.ptr = getelementptr i32, <8 x i32*> %vptr, <8 x i32> %ind
4198 //  or
4199 //   %gep.ptr = getelementptr i32, i32* %ptr,        <8 x i32> %ind
4200 // %res = call <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> %gep.ptr, ..
4201 //
4202 // When the first GEP operand is a single pointer - it is the uniform base we
4203 // are looking for. If first operand of the GEP is a splat vector - we
4204 // extract the splat value and use it as a uniform base.
4205 // In all other cases the function returns 'false'.
4206 static bool getUniformBase(const Value* &Ptr, SDValue& Base, SDValue& Index,
4207                            SDValue &Scale, SelectionDAGBuilder* SDB) {
4208   SelectionDAG& DAG = SDB->DAG;
4209   LLVMContext &Context = *DAG.getContext();
4210 
4211   assert(Ptr->getType()->isVectorTy() && "Uexpected pointer type");
4212   const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
4213   if (!GEP)
4214     return false;
4215 
4216   const Value *GEPPtr = GEP->getPointerOperand();
4217   if (!GEPPtr->getType()->isVectorTy())
4218     Ptr = GEPPtr;
4219   else if (!(Ptr = getSplatValue(GEPPtr)))
4220     return false;
4221 
4222   unsigned FinalIndex = GEP->getNumOperands() - 1;
4223   Value *IndexVal = GEP->getOperand(FinalIndex);
4224 
4225   // Ensure all the other indices are 0.
4226   for (unsigned i = 1; i < FinalIndex; ++i) {
4227     auto *C = dyn_cast<ConstantInt>(GEP->getOperand(i));
4228     if (!C || !C->isZero())
4229       return false;
4230   }
4231 
4232   // The operands of the GEP may be defined in another basic block.
4233   // In this case we'll not find nodes for the operands.
4234   if (!SDB->findValue(Ptr) || !SDB->findValue(IndexVal))
4235     return false;
4236 
4237   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4238   const DataLayout &DL = DAG.getDataLayout();
4239   Scale = DAG.getTargetConstant(DL.getTypeAllocSize(GEP->getResultElementType()),
4240                                 SDB->getCurSDLoc(), TLI.getPointerTy(DL));
4241   Base = SDB->getValue(Ptr);
4242   Index = SDB->getValue(IndexVal);
4243 
4244   if (!Index.getValueType().isVector()) {
4245     unsigned GEPWidth = GEP->getType()->getVectorNumElements();
4246     EVT VT = EVT::getVectorVT(Context, Index.getValueType(), GEPWidth);
4247     Index = DAG.getSplatBuildVector(VT, SDLoc(Index), Index);
4248   }
4249   return true;
4250 }
4251 
4252 void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
4253   SDLoc sdl = getCurSDLoc();
4254 
4255   // llvm.masked.scatter.*(Src0, Ptrs, alignemt, Mask)
4256   const Value *Ptr = I.getArgOperand(1);
4257   SDValue Src0 = getValue(I.getArgOperand(0));
4258   SDValue Mask = getValue(I.getArgOperand(3));
4259   EVT VT = Src0.getValueType();
4260   unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(2)))->getZExtValue();
4261   if (!Alignment)
4262     Alignment = DAG.getEVTAlignment(VT);
4263   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4264 
4265   AAMDNodes AAInfo;
4266   I.getAAMetadata(AAInfo);
4267 
4268   SDValue Base;
4269   SDValue Index;
4270   SDValue Scale;
4271   const Value *BasePtr = Ptr;
4272   bool UniformBase = getUniformBase(BasePtr, Base, Index, Scale, this);
4273 
4274   const Value *MemOpBasePtr = UniformBase ? BasePtr : nullptr;
4275   MachineMemOperand *MMO = DAG.getMachineFunction().
4276     getMachineMemOperand(MachinePointerInfo(MemOpBasePtr),
4277                          MachineMemOperand::MOStore,  VT.getStoreSize(),
4278                          Alignment, AAInfo);
4279   if (!UniformBase) {
4280     Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4281     Index = getValue(Ptr);
4282     Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4283   }
4284   SDValue Ops[] = { getRoot(), Src0, Mask, Base, Index, Scale };
4285   SDValue Scatter = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), VT, sdl,
4286                                          Ops, MMO);
4287   DAG.setRoot(Scatter);
4288   setValue(&I, Scatter);
4289 }
4290 
4291 void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) {
4292   SDLoc sdl = getCurSDLoc();
4293 
4294   auto getMaskedLoadOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
4295                            unsigned& Alignment) {
4296     // @llvm.masked.load.*(Ptr, alignment, Mask, Src0)
4297     Ptr = I.getArgOperand(0);
4298     Alignment = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
4299     Mask = I.getArgOperand(2);
4300     Src0 = I.getArgOperand(3);
4301   };
4302   auto getExpandingLoadOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
4303                            unsigned& Alignment) {
4304     // @llvm.masked.expandload.*(Ptr, Mask, Src0)
4305     Ptr = I.getArgOperand(0);
4306     Alignment = 0;
4307     Mask = I.getArgOperand(1);
4308     Src0 = I.getArgOperand(2);
4309   };
4310 
4311   Value  *PtrOperand, *MaskOperand, *Src0Operand;
4312   unsigned Alignment;
4313   if (IsExpanding)
4314     getExpandingLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4315   else
4316     getMaskedLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4317 
4318   SDValue Ptr = getValue(PtrOperand);
4319   SDValue Src0 = getValue(Src0Operand);
4320   SDValue Mask = getValue(MaskOperand);
4321 
4322   EVT VT = Src0.getValueType();
4323   if (!Alignment)
4324     Alignment = DAG.getEVTAlignment(VT);
4325 
4326   AAMDNodes AAInfo;
4327   I.getAAMetadata(AAInfo);
4328   const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
4329 
4330   // Do not serialize masked loads of constant memory with anything.
4331   bool AddToChain =
4332       !AA || !AA->pointsToConstantMemory(MemoryLocation(
4333                  PtrOperand,
4334                  LocationSize::precise(
4335                      DAG.getDataLayout().getTypeStoreSize(I.getType())),
4336                  AAInfo));
4337   SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
4338 
4339   MachineMemOperand *MMO =
4340     DAG.getMachineFunction().
4341     getMachineMemOperand(MachinePointerInfo(PtrOperand),
4342                           MachineMemOperand::MOLoad,  VT.getStoreSize(),
4343                           Alignment, AAInfo, Ranges);
4344 
4345   SDValue Load = DAG.getMaskedLoad(VT, sdl, InChain, Ptr, Mask, Src0, VT, MMO,
4346                                    ISD::NON_EXTLOAD, IsExpanding);
4347   if (AddToChain)
4348     PendingLoads.push_back(Load.getValue(1));
4349   setValue(&I, Load);
4350 }
4351 
4352 void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
4353   SDLoc sdl = getCurSDLoc();
4354 
4355   // @llvm.masked.gather.*(Ptrs, alignment, Mask, Src0)
4356   const Value *Ptr = I.getArgOperand(0);
4357   SDValue Src0 = getValue(I.getArgOperand(3));
4358   SDValue Mask = getValue(I.getArgOperand(2));
4359 
4360   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4361   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4362   unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(1)))->getZExtValue();
4363   if (!Alignment)
4364     Alignment = DAG.getEVTAlignment(VT);
4365 
4366   AAMDNodes AAInfo;
4367   I.getAAMetadata(AAInfo);
4368   const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
4369 
4370   SDValue Root = DAG.getRoot();
4371   SDValue Base;
4372   SDValue Index;
4373   SDValue Scale;
4374   const Value *BasePtr = Ptr;
4375   bool UniformBase = getUniformBase(BasePtr, Base, Index, Scale, this);
4376   bool ConstantMemory = false;
4377   if (UniformBase && AA &&
4378       AA->pointsToConstantMemory(
4379           MemoryLocation(BasePtr,
4380                          LocationSize::precise(
4381                              DAG.getDataLayout().getTypeStoreSize(I.getType())),
4382                          AAInfo))) {
4383     // Do not serialize (non-volatile) loads of constant memory with anything.
4384     Root = DAG.getEntryNode();
4385     ConstantMemory = true;
4386   }
4387 
4388   MachineMemOperand *MMO =
4389     DAG.getMachineFunction().
4390     getMachineMemOperand(MachinePointerInfo(UniformBase ? BasePtr : nullptr),
4391                          MachineMemOperand::MOLoad,  VT.getStoreSize(),
4392                          Alignment, AAInfo, Ranges);
4393 
4394   if (!UniformBase) {
4395     Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4396     Index = getValue(Ptr);
4397     Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4398   }
4399   SDValue Ops[] = { Root, Src0, Mask, Base, Index, Scale };
4400   SDValue Gather = DAG.getMaskedGather(DAG.getVTList(VT, MVT::Other), VT, sdl,
4401                                        Ops, MMO);
4402 
4403   SDValue OutChain = Gather.getValue(1);
4404   if (!ConstantMemory)
4405     PendingLoads.push_back(OutChain);
4406   setValue(&I, Gather);
4407 }
4408 
4409 void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
4410   SDLoc dl = getCurSDLoc();
4411   AtomicOrdering SuccessOrdering = I.getSuccessOrdering();
4412   AtomicOrdering FailureOrdering = I.getFailureOrdering();
4413   SyncScope::ID SSID = I.getSyncScopeID();
4414 
4415   SDValue InChain = getRoot();
4416 
4417   MVT MemVT = getValue(I.getCompareOperand()).getSimpleValueType();
4418   SDVTList VTs = DAG.getVTList(MemVT, MVT::i1, MVT::Other);
4419 
4420   auto Alignment = DAG.getEVTAlignment(MemVT);
4421 
4422   auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
4423   if (I.isVolatile())
4424     Flags |= MachineMemOperand::MOVolatile;
4425   Flags |= DAG.getTargetLoweringInfo().getMMOFlags(I);
4426 
4427   MachineFunction &MF = DAG.getMachineFunction();
4428   MachineMemOperand *MMO =
4429     MF.getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
4430                             Flags, MemVT.getStoreSize(), Alignment,
4431                             AAMDNodes(), nullptr, SSID, SuccessOrdering,
4432                             FailureOrdering);
4433 
4434   SDValue L = DAG.getAtomicCmpSwap(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS,
4435                                    dl, MemVT, VTs, InChain,
4436                                    getValue(I.getPointerOperand()),
4437                                    getValue(I.getCompareOperand()),
4438                                    getValue(I.getNewValOperand()), MMO);
4439 
4440   SDValue OutChain = L.getValue(2);
4441 
4442   setValue(&I, L);
4443   DAG.setRoot(OutChain);
4444 }
4445 
4446 void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
4447   SDLoc dl = getCurSDLoc();
4448   ISD::NodeType NT;
4449   switch (I.getOperation()) {
4450   default: llvm_unreachable("Unknown atomicrmw operation");
4451   case AtomicRMWInst::Xchg: NT = ISD::ATOMIC_SWAP; break;
4452   case AtomicRMWInst::Add:  NT = ISD::ATOMIC_LOAD_ADD; break;
4453   case AtomicRMWInst::Sub:  NT = ISD::ATOMIC_LOAD_SUB; break;
4454   case AtomicRMWInst::And:  NT = ISD::ATOMIC_LOAD_AND; break;
4455   case AtomicRMWInst::Nand: NT = ISD::ATOMIC_LOAD_NAND; break;
4456   case AtomicRMWInst::Or:   NT = ISD::ATOMIC_LOAD_OR; break;
4457   case AtomicRMWInst::Xor:  NT = ISD::ATOMIC_LOAD_XOR; break;
4458   case AtomicRMWInst::Max:  NT = ISD::ATOMIC_LOAD_MAX; break;
4459   case AtomicRMWInst::Min:  NT = ISD::ATOMIC_LOAD_MIN; break;
4460   case AtomicRMWInst::UMax: NT = ISD::ATOMIC_LOAD_UMAX; break;
4461   case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break;
4462   case AtomicRMWInst::FAdd: NT = ISD::ATOMIC_LOAD_FADD; break;
4463   case AtomicRMWInst::FSub: NT = ISD::ATOMIC_LOAD_FSUB; break;
4464   }
4465   AtomicOrdering Ordering = I.getOrdering();
4466   SyncScope::ID SSID = I.getSyncScopeID();
4467 
4468   SDValue InChain = getRoot();
4469 
4470   auto MemVT = getValue(I.getValOperand()).getSimpleValueType();
4471   auto Alignment = DAG.getEVTAlignment(MemVT);
4472 
4473   auto Flags = MachineMemOperand::MOLoad |  MachineMemOperand::MOStore;
4474   if (I.isVolatile())
4475     Flags |= MachineMemOperand::MOVolatile;
4476   Flags |= DAG.getTargetLoweringInfo().getMMOFlags(I);
4477 
4478   MachineFunction &MF = DAG.getMachineFunction();
4479   MachineMemOperand *MMO =
4480     MF.getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()), Flags,
4481                             MemVT.getStoreSize(), Alignment, AAMDNodes(),
4482                             nullptr, SSID, Ordering);
4483 
4484   SDValue L =
4485     DAG.getAtomic(NT, dl, MemVT, InChain,
4486                   getValue(I.getPointerOperand()), getValue(I.getValOperand()),
4487                   MMO);
4488 
4489   SDValue OutChain = L.getValue(1);
4490 
4491   setValue(&I, L);
4492   DAG.setRoot(OutChain);
4493 }
4494 
4495 void SelectionDAGBuilder::visitFence(const FenceInst &I) {
4496   SDLoc dl = getCurSDLoc();
4497   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4498   SDValue Ops[3];
4499   Ops[0] = getRoot();
4500   Ops[1] = DAG.getConstant((unsigned)I.getOrdering(), dl,
4501                            TLI.getFenceOperandTy(DAG.getDataLayout()));
4502   Ops[2] = DAG.getConstant(I.getSyncScopeID(), dl,
4503                            TLI.getFenceOperandTy(DAG.getDataLayout()));
4504   DAG.setRoot(DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops));
4505 }
4506 
4507 void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
4508   SDLoc dl = getCurSDLoc();
4509   AtomicOrdering Order = I.getOrdering();
4510   SyncScope::ID SSID = I.getSyncScopeID();
4511 
4512   SDValue InChain = getRoot();
4513 
4514   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4515   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4516 
4517   if (!TLI.supportsUnalignedAtomics() &&
4518       I.getAlignment() < VT.getStoreSize())
4519     report_fatal_error("Cannot generate unaligned atomic load");
4520 
4521   auto Flags = MachineMemOperand::MOLoad;
4522   if (I.isVolatile())
4523     Flags |= MachineMemOperand::MOVolatile;
4524   Flags |= TLI.getMMOFlags(I);
4525 
4526   MachineMemOperand *MMO =
4527       DAG.getMachineFunction().
4528       getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
4529                            Flags, VT.getStoreSize(),
4530                            I.getAlignment() ? I.getAlignment() :
4531                                               DAG.getEVTAlignment(VT),
4532                            AAMDNodes(), nullptr, SSID, Order);
4533 
4534   InChain = TLI.prepareVolatileOrAtomicLoad(InChain, dl, DAG);
4535   SDValue L =
4536       DAG.getAtomic(ISD::ATOMIC_LOAD, dl, VT, VT, InChain,
4537                     getValue(I.getPointerOperand()), MMO);
4538 
4539   SDValue OutChain = L.getValue(1);
4540 
4541   setValue(&I, L);
4542   DAG.setRoot(OutChain);
4543 }
4544 
4545 void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
4546   SDLoc dl = getCurSDLoc();
4547 
4548   AtomicOrdering Ordering = I.getOrdering();
4549   SyncScope::ID SSID = I.getSyncScopeID();
4550 
4551   SDValue InChain = getRoot();
4552 
4553   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4554   EVT VT =
4555       TLI.getValueType(DAG.getDataLayout(), I.getValueOperand()->getType());
4556 
4557   if (I.getAlignment() < VT.getStoreSize())
4558     report_fatal_error("Cannot generate unaligned atomic store");
4559 
4560   auto Flags = MachineMemOperand::MOStore;
4561   if (I.isVolatile())
4562     Flags |= MachineMemOperand::MOVolatile;
4563   Flags |= TLI.getMMOFlags(I);
4564 
4565   MachineFunction &MF = DAG.getMachineFunction();
4566   MachineMemOperand *MMO =
4567     MF.getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()), Flags,
4568                             VT.getStoreSize(), I.getAlignment(), AAMDNodes(),
4569                             nullptr, SSID, Ordering);
4570   SDValue OutChain =
4571     DAG.getAtomic(ISD::ATOMIC_STORE, dl, VT, InChain,
4572               getValue(I.getPointerOperand()), getValue(I.getValueOperand()),
4573               MMO);
4574 
4575 
4576   DAG.setRoot(OutChain);
4577 }
4578 
4579 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
4580 /// node.
4581 void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
4582                                                unsigned Intrinsic) {
4583   // Ignore the callsite's attributes. A specific call site may be marked with
4584   // readnone, but the lowering code will expect the chain based on the
4585   // definition.
4586   const Function *F = I.getCalledFunction();
4587   bool HasChain = !F->doesNotAccessMemory();
4588   bool OnlyLoad = HasChain && F->onlyReadsMemory();
4589 
4590   // Build the operand list.
4591   SmallVector<SDValue, 8> Ops;
4592   if (HasChain) {  // If this intrinsic has side-effects, chainify it.
4593     if (OnlyLoad) {
4594       // We don't need to serialize loads against other loads.
4595       Ops.push_back(DAG.getRoot());
4596     } else {
4597       Ops.push_back(getRoot());
4598     }
4599   }
4600 
4601   // Info is set by getTgtMemInstrinsic
4602   TargetLowering::IntrinsicInfo Info;
4603   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4604   bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I,
4605                                                DAG.getMachineFunction(),
4606                                                Intrinsic);
4607 
4608   // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
4609   if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID ||
4610       Info.opc == ISD::INTRINSIC_W_CHAIN)
4611     Ops.push_back(DAG.getTargetConstant(Intrinsic, getCurSDLoc(),
4612                                         TLI.getPointerTy(DAG.getDataLayout())));
4613 
4614   // Add all operands of the call to the operand list.
4615   for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
4616     SDValue Op = getValue(I.getArgOperand(i));
4617     Ops.push_back(Op);
4618   }
4619 
4620   SmallVector<EVT, 4> ValueVTs;
4621   ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);
4622 
4623   if (HasChain)
4624     ValueVTs.push_back(MVT::Other);
4625 
4626   SDVTList VTs = DAG.getVTList(ValueVTs);
4627 
4628   // Create the node.
4629   SDValue Result;
4630   if (IsTgtIntrinsic) {
4631     // This is target intrinsic that touches memory
4632     Result = DAG.getMemIntrinsicNode(Info.opc, getCurSDLoc(), VTs,
4633       Ops, Info.memVT,
4634       MachinePointerInfo(Info.ptrVal, Info.offset), Info.align,
4635       Info.flags, Info.size);
4636   } else if (!HasChain) {
4637     Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurSDLoc(), VTs, Ops);
4638   } else if (!I.getType()->isVoidTy()) {
4639     Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurSDLoc(), VTs, Ops);
4640   } else {
4641     Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops);
4642   }
4643 
4644   if (HasChain) {
4645     SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
4646     if (OnlyLoad)
4647       PendingLoads.push_back(Chain);
4648     else
4649       DAG.setRoot(Chain);
4650   }
4651 
4652   if (!I.getType()->isVoidTy()) {
4653     if (VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
4654       EVT VT = TLI.getValueType(DAG.getDataLayout(), PTy);
4655       Result = DAG.getNode(ISD::BITCAST, getCurSDLoc(), VT, Result);
4656     } else
4657       Result = lowerRangeToAssertZExt(DAG, I, Result);
4658 
4659     setValue(&I, Result);
4660   }
4661 }
4662 
4663 /// GetSignificand - Get the significand and build it into a floating-point
4664 /// number with exponent of 1:
4665 ///
4666 ///   Op = (Op & 0x007fffff) | 0x3f800000;
4667 ///
4668 /// where Op is the hexadecimal representation of floating point value.
4669 static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl) {
4670   SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
4671                            DAG.getConstant(0x007fffff, dl, MVT::i32));
4672   SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
4673                            DAG.getConstant(0x3f800000, dl, MVT::i32));
4674   return DAG.getNode(ISD::BITCAST, dl, MVT::f32, t2);
4675 }
4676 
4677 /// GetExponent - Get the exponent:
4678 ///
4679 ///   (float)(int)(((Op & 0x7f800000) >> 23) - 127);
4680 ///
4681 /// where Op is the hexadecimal representation of floating point value.
4682 static SDValue GetExponent(SelectionDAG &DAG, SDValue Op,
4683                            const TargetLowering &TLI, const SDLoc &dl) {
4684   SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
4685                            DAG.getConstant(0x7f800000, dl, MVT::i32));
4686   SDValue t1 = DAG.getNode(
4687       ISD::SRL, dl, MVT::i32, t0,
4688       DAG.getConstant(23, dl, TLI.getPointerTy(DAG.getDataLayout())));
4689   SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
4690                            DAG.getConstant(127, dl, MVT::i32));
4691   return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
4692 }
4693 
4694 /// getF32Constant - Get 32-bit floating point constant.
4695 static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt,
4696                               const SDLoc &dl) {
4697   return DAG.getConstantFP(APFloat(APFloat::IEEEsingle(), APInt(32, Flt)), dl,
4698                            MVT::f32);
4699 }
4700 
4701 static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl,
4702                                        SelectionDAG &DAG) {
4703   // TODO: What fast-math-flags should be set on the floating-point nodes?
4704 
4705   //   IntegerPartOfX = ((int32_t)(t0);
4706   SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
4707 
4708   //   FractionalPartOfX = t0 - (float)IntegerPartOfX;
4709   SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
4710   SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
4711 
4712   //   IntegerPartOfX <<= 23;
4713   IntegerPartOfX = DAG.getNode(
4714       ISD::SHL, dl, MVT::i32, IntegerPartOfX,
4715       DAG.getConstant(23, dl, DAG.getTargetLoweringInfo().getPointerTy(
4716                                   DAG.getDataLayout())));
4717 
4718   SDValue TwoToFractionalPartOfX;
4719   if (LimitFloatPrecision <= 6) {
4720     // For floating-point precision of 6:
4721     //
4722     //   TwoToFractionalPartOfX =
4723     //     0.997535578f +
4724     //       (0.735607626f + 0.252464424f * x) * x;
4725     //
4726     // error 0.0144103317, which is 6 bits
4727     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4728                              getF32Constant(DAG, 0x3e814304, dl));
4729     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4730                              getF32Constant(DAG, 0x3f3c50c8, dl));
4731     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4732     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4733                                          getF32Constant(DAG, 0x3f7f5e7e, dl));
4734   } else if (LimitFloatPrecision <= 12) {
4735     // For floating-point precision of 12:
4736     //
4737     //   TwoToFractionalPartOfX =
4738     //     0.999892986f +
4739     //       (0.696457318f +
4740     //         (0.224338339f + 0.792043434e-1f * x) * x) * x;
4741     //
4742     // error 0.000107046256, which is 13 to 14 bits
4743     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4744                              getF32Constant(DAG, 0x3da235e3, dl));
4745     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4746                              getF32Constant(DAG, 0x3e65b8f3, dl));
4747     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4748     SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4749                              getF32Constant(DAG, 0x3f324b07, dl));
4750     SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4751     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4752                                          getF32Constant(DAG, 0x3f7ff8fd, dl));
4753   } else { // LimitFloatPrecision <= 18
4754     // For floating-point precision of 18:
4755     //
4756     //   TwoToFractionalPartOfX =
4757     //     0.999999982f +
4758     //       (0.693148872f +
4759     //         (0.240227044f +
4760     //           (0.554906021e-1f +
4761     //             (0.961591928e-2f +
4762     //               (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
4763     // error 2.47208000*10^(-7), which is better than 18 bits
4764     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4765                              getF32Constant(DAG, 0x3924b03e, dl));
4766     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4767                              getF32Constant(DAG, 0x3ab24b87, dl));
4768     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4769     SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4770                              getF32Constant(DAG, 0x3c1d8c17, dl));
4771     SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4772     SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4773                              getF32Constant(DAG, 0x3d634a1d, dl));
4774     SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4775     SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4776                              getF32Constant(DAG, 0x3e75fe14, dl));
4777     SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4778     SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
4779                               getF32Constant(DAG, 0x3f317234, dl));
4780     SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
4781     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
4782                                          getF32Constant(DAG, 0x3f800000, dl));
4783   }
4784 
4785   // Add the exponent into the result in integer domain.
4786   SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, TwoToFractionalPartOfX);
4787   return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
4788                      DAG.getNode(ISD::ADD, dl, MVT::i32, t13, IntegerPartOfX));
4789 }
4790 
4791 /// expandExp - Lower an exp intrinsic. Handles the special sequences for
4792 /// limited-precision mode.
4793 static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4794                          const TargetLowering &TLI) {
4795   if (Op.getValueType() == MVT::f32 &&
4796       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4797 
4798     // Put the exponent in the right bit position for later addition to the
4799     // final result:
4800     //
4801     //   #define LOG2OFe 1.4426950f
4802     //   t0 = Op * LOG2OFe
4803 
4804     // TODO: What fast-math-flags should be set here?
4805     SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
4806                              getF32Constant(DAG, 0x3fb8aa3b, dl));
4807     return getLimitedPrecisionExp2(t0, dl, DAG);
4808   }
4809 
4810   // No special expansion.
4811   return DAG.getNode(ISD::FEXP, dl, Op.getValueType(), Op);
4812 }
4813 
4814 /// expandLog - Lower a log intrinsic. Handles the special sequences for
4815 /// limited-precision mode.
4816 static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4817                          const TargetLowering &TLI) {
4818   // TODO: What fast-math-flags should be set on the floating-point nodes?
4819 
4820   if (Op.getValueType() == MVT::f32 &&
4821       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4822     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
4823 
4824     // Scale the exponent by log(2) [0.69314718f].
4825     SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
4826     SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
4827                                         getF32Constant(DAG, 0x3f317218, dl));
4828 
4829     // Get the significand and build it into a floating-point number with
4830     // exponent of 1.
4831     SDValue X = GetSignificand(DAG, Op1, dl);
4832 
4833     SDValue LogOfMantissa;
4834     if (LimitFloatPrecision <= 6) {
4835       // For floating-point precision of 6:
4836       //
4837       //   LogofMantissa =
4838       //     -1.1609546f +
4839       //       (1.4034025f - 0.23903021f * x) * x;
4840       //
4841       // error 0.0034276066, which is better than 8 bits
4842       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4843                                getF32Constant(DAG, 0xbe74c456, dl));
4844       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4845                                getF32Constant(DAG, 0x3fb3a2b1, dl));
4846       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4847       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4848                                   getF32Constant(DAG, 0x3f949a29, dl));
4849     } else if (LimitFloatPrecision <= 12) {
4850       // For floating-point precision of 12:
4851       //
4852       //   LogOfMantissa =
4853       //     -1.7417939f +
4854       //       (2.8212026f +
4855       //         (-1.4699568f +
4856       //           (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
4857       //
4858       // error 0.000061011436, which is 14 bits
4859       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4860                                getF32Constant(DAG, 0xbd67b6d6, dl));
4861       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4862                                getF32Constant(DAG, 0x3ee4f4b8, dl));
4863       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4864       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4865                                getF32Constant(DAG, 0x3fbc278b, dl));
4866       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4867       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4868                                getF32Constant(DAG, 0x40348e95, dl));
4869       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4870       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4871                                   getF32Constant(DAG, 0x3fdef31a, dl));
4872     } else { // LimitFloatPrecision <= 18
4873       // For floating-point precision of 18:
4874       //
4875       //   LogOfMantissa =
4876       //     -2.1072184f +
4877       //       (4.2372794f +
4878       //         (-3.7029485f +
4879       //           (2.2781945f +
4880       //             (-0.87823314f +
4881       //               (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
4882       //
4883       // error 0.0000023660568, which is better than 18 bits
4884       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4885                                getF32Constant(DAG, 0xbc91e5ac, dl));
4886       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4887                                getF32Constant(DAG, 0x3e4350aa, dl));
4888       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4889       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4890                                getF32Constant(DAG, 0x3f60d3e3, dl));
4891       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4892       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4893                                getF32Constant(DAG, 0x4011cdf0, dl));
4894       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4895       SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4896                                getF32Constant(DAG, 0x406cfd1c, dl));
4897       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4898       SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4899                                getF32Constant(DAG, 0x408797cb, dl));
4900       SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4901       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
4902                                   getF32Constant(DAG, 0x4006dcab, dl));
4903     }
4904 
4905     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, LogOfMantissa);
4906   }
4907 
4908   // No special expansion.
4909   return DAG.getNode(ISD::FLOG, dl, Op.getValueType(), Op);
4910 }
4911 
4912 /// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for
4913 /// limited-precision mode.
4914 static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4915                           const TargetLowering &TLI) {
4916   // TODO: What fast-math-flags should be set on the floating-point nodes?
4917 
4918   if (Op.getValueType() == MVT::f32 &&
4919       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4920     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
4921 
4922     // Get the exponent.
4923     SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
4924 
4925     // Get the significand and build it into a floating-point number with
4926     // exponent of 1.
4927     SDValue X = GetSignificand(DAG, Op1, dl);
4928 
4929     // Different possible minimax approximations of significand in
4930     // floating-point for various degrees of accuracy over [1,2].
4931     SDValue Log2ofMantissa;
4932     if (LimitFloatPrecision <= 6) {
4933       // For floating-point precision of 6:
4934       //
4935       //   Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
4936       //
4937       // error 0.0049451742, which is more than 7 bits
4938       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4939                                getF32Constant(DAG, 0xbeb08fe0, dl));
4940       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4941                                getF32Constant(DAG, 0x40019463, dl));
4942       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4943       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4944                                    getF32Constant(DAG, 0x3fd6633d, dl));
4945     } else if (LimitFloatPrecision <= 12) {
4946       // For floating-point precision of 12:
4947       //
4948       //   Log2ofMantissa =
4949       //     -2.51285454f +
4950       //       (4.07009056f +
4951       //         (-2.12067489f +
4952       //           (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
4953       //
4954       // error 0.0000876136000, which is better than 13 bits
4955       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4956                                getF32Constant(DAG, 0xbda7262e, dl));
4957       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4958                                getF32Constant(DAG, 0x3f25280b, dl));
4959       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4960       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4961                                getF32Constant(DAG, 0x4007b923, dl));
4962       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4963       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4964                                getF32Constant(DAG, 0x40823e2f, dl));
4965       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4966       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4967                                    getF32Constant(DAG, 0x4020d29c, dl));
4968     } else { // LimitFloatPrecision <= 18
4969       // For floating-point precision of 18:
4970       //
4971       //   Log2ofMantissa =
4972       //     -3.0400495f +
4973       //       (6.1129976f +
4974       //         (-5.3420409f +
4975       //           (3.2865683f +
4976       //             (-1.2669343f +
4977       //               (0.27515199f -
4978       //                 0.25691327e-1f * x) * x) * x) * x) * x) * x;
4979       //
4980       // error 0.0000018516, which is better than 18 bits
4981       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4982                                getF32Constant(DAG, 0xbcd2769e, dl));
4983       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4984                                getF32Constant(DAG, 0x3e8ce0b9, dl));
4985       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4986       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4987                                getF32Constant(DAG, 0x3fa22ae7, dl));
4988       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4989       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4990                                getF32Constant(DAG, 0x40525723, dl));
4991       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4992       SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4993                                getF32Constant(DAG, 0x40aaf200, dl));
4994       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4995       SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4996                                getF32Constant(DAG, 0x40c39dad, dl));
4997       SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4998       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
4999                                    getF32Constant(DAG, 0x4042902c, dl));
5000     }
5001 
5002     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
5003   }
5004 
5005   // No special expansion.
5006   return DAG.getNode(ISD::FLOG2, dl, Op.getValueType(), Op);
5007 }
5008 
5009 /// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for
5010 /// limited-precision mode.
5011 static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5012                            const TargetLowering &TLI) {
5013   // TODO: What fast-math-flags should be set on the floating-point nodes?
5014 
5015   if (Op.getValueType() == MVT::f32 &&
5016       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5017     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5018 
5019     // Scale the exponent by log10(2) [0.30102999f].
5020     SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
5021     SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
5022                                         getF32Constant(DAG, 0x3e9a209a, dl));
5023 
5024     // Get the significand and build it into a floating-point number with
5025     // exponent of 1.
5026     SDValue X = GetSignificand(DAG, Op1, dl);
5027 
5028     SDValue Log10ofMantissa;
5029     if (LimitFloatPrecision <= 6) {
5030       // For floating-point precision of 6:
5031       //
5032       //   Log10ofMantissa =
5033       //     -0.50419619f +
5034       //       (0.60948995f - 0.10380950f * x) * x;
5035       //
5036       // error 0.0014886165, which is 6 bits
5037       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5038                                getF32Constant(DAG, 0xbdd49a13, dl));
5039       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5040                                getF32Constant(DAG, 0x3f1c0789, dl));
5041       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5042       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5043                                     getF32Constant(DAG, 0x3f011300, dl));
5044     } else if (LimitFloatPrecision <= 12) {
5045       // For floating-point precision of 12:
5046       //
5047       //   Log10ofMantissa =
5048       //     -0.64831180f +
5049       //       (0.91751397f +
5050       //         (-0.31664806f + 0.47637168e-1f * x) * x) * x;
5051       //
5052       // error 0.00019228036, which is better than 12 bits
5053       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5054                                getF32Constant(DAG, 0x3d431f31, dl));
5055       SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
5056                                getF32Constant(DAG, 0x3ea21fb2, dl));
5057       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5058       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5059                                getF32Constant(DAG, 0x3f6ae232, dl));
5060       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5061       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
5062                                     getF32Constant(DAG, 0x3f25f7c3, dl));
5063     } else { // LimitFloatPrecision <= 18
5064       // For floating-point precision of 18:
5065       //
5066       //   Log10ofMantissa =
5067       //     -0.84299375f +
5068       //       (1.5327582f +
5069       //         (-1.0688956f +
5070       //           (0.49102474f +
5071       //             (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
5072       //
5073       // error 0.0000037995730, which is better than 18 bits
5074       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5075                                getF32Constant(DAG, 0x3c5d51ce, dl));
5076       SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
5077                                getF32Constant(DAG, 0x3e00685a, dl));
5078       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5079       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5080                                getF32Constant(DAG, 0x3efb6798, dl));
5081       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5082       SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
5083                                getF32Constant(DAG, 0x3f88d192, dl));
5084       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5085       SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5086                                getF32Constant(DAG, 0x3fc4316c, dl));
5087       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5088       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
5089                                     getF32Constant(DAG, 0x3f57ce70, dl));
5090     }
5091 
5092     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
5093   }
5094 
5095   // No special expansion.
5096   return DAG.getNode(ISD::FLOG10, dl, Op.getValueType(), Op);
5097 }
5098 
5099 /// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for
5100 /// limited-precision mode.
5101 static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5102                           const TargetLowering &TLI) {
5103   if (Op.getValueType() == MVT::f32 &&
5104       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18)
5105     return getLimitedPrecisionExp2(Op, dl, DAG);
5106 
5107   // No special expansion.
5108   return DAG.getNode(ISD::FEXP2, dl, Op.getValueType(), Op);
5109 }
5110 
5111 /// visitPow - Lower a pow intrinsic. Handles the special sequences for
5112 /// limited-precision mode with x == 10.0f.
5113 static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS,
5114                          SelectionDAG &DAG, const TargetLowering &TLI) {
5115   bool IsExp10 = false;
5116   if (LHS.getValueType() == MVT::f32 && RHS.getValueType() == MVT::f32 &&
5117       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5118     if (ConstantFPSDNode *LHSC = dyn_cast<ConstantFPSDNode>(LHS)) {
5119       APFloat Ten(10.0f);
5120       IsExp10 = LHSC->isExactlyValue(Ten);
5121     }
5122   }
5123 
5124   // TODO: What fast-math-flags should be set on the FMUL node?
5125   if (IsExp10) {
5126     // Put the exponent in the right bit position for later addition to the
5127     // final result:
5128     //
5129     //   #define LOG2OF10 3.3219281f
5130     //   t0 = Op * LOG2OF10;
5131     SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, RHS,
5132                              getF32Constant(DAG, 0x40549a78, dl));
5133     return getLimitedPrecisionExp2(t0, dl, DAG);
5134   }
5135 
5136   // No special expansion.
5137   return DAG.getNode(ISD::FPOW, dl, LHS.getValueType(), LHS, RHS);
5138 }
5139 
5140 /// ExpandPowI - Expand a llvm.powi intrinsic.
5141 static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS,
5142                           SelectionDAG &DAG) {
5143   // If RHS is a constant, we can expand this out to a multiplication tree,
5144   // otherwise we end up lowering to a call to __powidf2 (for example).  When
5145   // optimizing for size, we only want to do this if the expansion would produce
5146   // a small number of multiplies, otherwise we do the full expansion.
5147   if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
5148     // Get the exponent as a positive value.
5149     unsigned Val = RHSC->getSExtValue();
5150     if ((int)Val < 0) Val = -Val;
5151 
5152     // powi(x, 0) -> 1.0
5153     if (Val == 0)
5154       return DAG.getConstantFP(1.0, DL, LHS.getValueType());
5155 
5156     const Function &F = DAG.getMachineFunction().getFunction();
5157     if (!F.optForSize() ||
5158         // If optimizing for size, don't insert too many multiplies.
5159         // This inserts up to 5 multiplies.
5160         countPopulation(Val) + Log2_32(Val) < 7) {
5161       // We use the simple binary decomposition method to generate the multiply
5162       // sequence.  There are more optimal ways to do this (for example,
5163       // powi(x,15) generates one more multiply than it should), but this has
5164       // the benefit of being both really simple and much better than a libcall.
5165       SDValue Res;  // Logically starts equal to 1.0
5166       SDValue CurSquare = LHS;
5167       // TODO: Intrinsics should have fast-math-flags that propagate to these
5168       // nodes.
5169       while (Val) {
5170         if (Val & 1) {
5171           if (Res.getNode())
5172             Res = DAG.getNode(ISD::FMUL, DL,Res.getValueType(), Res, CurSquare);
5173           else
5174             Res = CurSquare;  // 1.0*CurSquare.
5175         }
5176 
5177         CurSquare = DAG.getNode(ISD::FMUL, DL, CurSquare.getValueType(),
5178                                 CurSquare, CurSquare);
5179         Val >>= 1;
5180       }
5181 
5182       // If the original was negative, invert the result, producing 1/(x*x*x).
5183       if (RHSC->getSExtValue() < 0)
5184         Res = DAG.getNode(ISD::FDIV, DL, LHS.getValueType(),
5185                           DAG.getConstantFP(1.0, DL, LHS.getValueType()), Res);
5186       return Res;
5187     }
5188   }
5189 
5190   // Otherwise, expand to a libcall.
5191   return DAG.getNode(ISD::FPOWI, DL, LHS.getValueType(), LHS, RHS);
5192 }
5193 
5194 // getUnderlyingArgReg - Find underlying register used for a truncated or
5195 // bitcasted argument.
5196 static unsigned getUnderlyingArgReg(const SDValue &N) {
5197   switch (N.getOpcode()) {
5198   case ISD::CopyFromReg:
5199     return cast<RegisterSDNode>(N.getOperand(1))->getReg();
5200   case ISD::BITCAST:
5201   case ISD::AssertZext:
5202   case ISD::AssertSext:
5203   case ISD::TRUNCATE:
5204     return getUnderlyingArgReg(N.getOperand(0));
5205   default:
5206     return 0;
5207   }
5208 }
5209 
5210 /// If the DbgValueInst is a dbg_value of a function argument, create the
5211 /// corresponding DBG_VALUE machine instruction for it now.  At the end of
5212 /// instruction selection, they will be inserted to the entry BB.
5213 bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
5214     const Value *V, DILocalVariable *Variable, DIExpression *Expr,
5215     DILocation *DL, bool IsDbgDeclare, const SDValue &N) {
5216   const Argument *Arg = dyn_cast<Argument>(V);
5217   if (!Arg)
5218     return false;
5219 
5220   if (!IsDbgDeclare) {
5221     // ArgDbgValues are hoisted to the beginning of the entry block. So we
5222     // should only emit as ArgDbgValue if the dbg.value intrinsic is found in
5223     // the entry block.
5224     bool IsInEntryBlock = FuncInfo.MBB == &FuncInfo.MF->front();
5225     if (!IsInEntryBlock)
5226       return false;
5227 
5228     // ArgDbgValues are hoisted to the beginning of the entry block.  So we
5229     // should only emit as ArgDbgValue if the dbg.value intrinsic describes a
5230     // variable that also is a param.
5231     //
5232     // Although, if we are at the top of the entry block already, we can still
5233     // emit using ArgDbgValue. This might catch some situations when the
5234     // dbg.value refers to an argument that isn't used in the entry block, so
5235     // any CopyToReg node would be optimized out and the only way to express
5236     // this DBG_VALUE is by using the physical reg (or FI) as done in this
5237     // method.  ArgDbgValues are hoisted to the beginning of the entry block. So
5238     // we should only emit as ArgDbgValue if the Variable is an argument to the
5239     // current function, and the dbg.value intrinsic is found in the entry
5240     // block.
5241     bool VariableIsFunctionInputArg = Variable->isParameter() &&
5242         !DL->getInlinedAt();
5243     bool IsInPrologue = SDNodeOrder == LowestSDNodeOrder;
5244     if (!IsInPrologue && !VariableIsFunctionInputArg)
5245       return false;
5246 
5247     // Here we assume that a function argument on IR level only can be used to
5248     // describe one input parameter on source level. If we for example have
5249     // source code like this
5250     //
5251     //    struct A { long x, y; };
5252     //    void foo(struct A a, long b) {
5253     //      ...
5254     //      b = a.x;
5255     //      ...
5256     //    }
5257     //
5258     // and IR like this
5259     //
5260     //  define void @foo(i32 %a1, i32 %a2, i32 %b)  {
5261     //  entry:
5262     //    call void @llvm.dbg.value(metadata i32 %a1, "a", DW_OP_LLVM_fragment
5263     //    call void @llvm.dbg.value(metadata i32 %a2, "a", DW_OP_LLVM_fragment
5264     //    call void @llvm.dbg.value(metadata i32 %b, "b",
5265     //    ...
5266     //    call void @llvm.dbg.value(metadata i32 %a1, "b"
5267     //    ...
5268     //
5269     // then the last dbg.value is describing a parameter "b" using a value that
5270     // is an argument. But since we already has used %a1 to describe a parameter
5271     // we should not handle that last dbg.value here (that would result in an
5272     // incorrect hoisting of the DBG_VALUE to the function entry).
5273     // Notice that we allow one dbg.value per IR level argument, to accomodate
5274     // for the situation with fragments above.
5275     if (VariableIsFunctionInputArg) {
5276       unsigned ArgNo = Arg->getArgNo();
5277       if (ArgNo >= FuncInfo.DescribedArgs.size())
5278         FuncInfo.DescribedArgs.resize(ArgNo + 1, false);
5279       else if (!IsInPrologue && FuncInfo.DescribedArgs.test(ArgNo))
5280         return false;
5281       FuncInfo.DescribedArgs.set(ArgNo);
5282     }
5283   }
5284 
5285   MachineFunction &MF = DAG.getMachineFunction();
5286   const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
5287 
5288   bool IsIndirect = false;
5289   Optional<MachineOperand> Op;
5290   // Some arguments' frame index is recorded during argument lowering.
5291   int FI = FuncInfo.getArgumentFrameIndex(Arg);
5292   if (FI != std::numeric_limits<int>::max())
5293     Op = MachineOperand::CreateFI(FI);
5294 
5295   if (!Op && N.getNode()) {
5296     unsigned Reg = getUnderlyingArgReg(N);
5297     if (Reg && TargetRegisterInfo::isVirtualRegister(Reg)) {
5298       MachineRegisterInfo &RegInfo = MF.getRegInfo();
5299       unsigned PR = RegInfo.getLiveInPhysReg(Reg);
5300       if (PR)
5301         Reg = PR;
5302     }
5303     if (Reg) {
5304       Op = MachineOperand::CreateReg(Reg, false);
5305       IsIndirect = IsDbgDeclare;
5306     }
5307   }
5308 
5309   if (!Op && N.getNode())
5310     // Check if frame index is available.
5311     if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(N.getNode()))
5312       if (FrameIndexSDNode *FINode =
5313           dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
5314         Op = MachineOperand::CreateFI(FINode->getIndex());
5315 
5316   if (!Op) {
5317     // Check if ValueMap has reg number.
5318     DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
5319     if (VMI != FuncInfo.ValueMap.end()) {
5320       const auto &TLI = DAG.getTargetLoweringInfo();
5321       RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), VMI->second,
5322                        V->getType(), getABIRegCopyCC(V));
5323       if (RFV.occupiesMultipleRegs()) {
5324         unsigned Offset = 0;
5325         for (auto RegAndSize : RFV.getRegsAndSizes()) {
5326           Op = MachineOperand::CreateReg(RegAndSize.first, false);
5327           auto FragmentExpr = DIExpression::createFragmentExpression(
5328               Expr, Offset, RegAndSize.second);
5329           if (!FragmentExpr)
5330             continue;
5331           FuncInfo.ArgDbgValues.push_back(
5332               BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsDbgDeclare,
5333                       Op->getReg(), Variable, *FragmentExpr));
5334           Offset += RegAndSize.second;
5335         }
5336         return true;
5337       }
5338       Op = MachineOperand::CreateReg(VMI->second, false);
5339       IsIndirect = IsDbgDeclare;
5340     }
5341   }
5342 
5343   if (!Op)
5344     return false;
5345 
5346   assert(Variable->isValidLocationForIntrinsic(DL) &&
5347          "Expected inlined-at fields to agree");
5348   IsIndirect = (Op->isReg()) ? IsIndirect : true;
5349   FuncInfo.ArgDbgValues.push_back(
5350       BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsIndirect,
5351               *Op, Variable, Expr));
5352 
5353   return true;
5354 }
5355 
5356 /// Return the appropriate SDDbgValue based on N.
5357 SDDbgValue *SelectionDAGBuilder::getDbgValue(SDValue N,
5358                                              DILocalVariable *Variable,
5359                                              DIExpression *Expr,
5360                                              const DebugLoc &dl,
5361                                              unsigned DbgSDNodeOrder) {
5362   if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) {
5363     // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can describe
5364     // stack slot locations.
5365     //
5366     // Consider "int x = 0; int *px = &x;". There are two kinds of interesting
5367     // debug values here after optimization:
5368     //
5369     //   dbg.value(i32* %px, !"int *px", !DIExpression()), and
5370     //   dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref))
5371     //
5372     // Both describe the direct values of their associated variables.
5373     return DAG.getFrameIndexDbgValue(Variable, Expr, FISDN->getIndex(),
5374                                      /*IsIndirect*/ false, dl, DbgSDNodeOrder);
5375   }
5376   return DAG.getDbgValue(Variable, Expr, N.getNode(), N.getResNo(),
5377                          /*IsIndirect*/ false, dl, DbgSDNodeOrder);
5378 }
5379 
5380 // VisualStudio defines setjmp as _setjmp
5381 #if defined(_MSC_VER) && defined(setjmp) && \
5382                          !defined(setjmp_undefined_for_msvc)
5383 #  pragma push_macro("setjmp")
5384 #  undef setjmp
5385 #  define setjmp_undefined_for_msvc
5386 #endif
5387 
5388 static unsigned FixedPointIntrinsicToOpcode(unsigned Intrinsic) {
5389   switch (Intrinsic) {
5390   case Intrinsic::smul_fix:
5391     return ISD::SMULFIX;
5392   case Intrinsic::umul_fix:
5393     return ISD::UMULFIX;
5394   default:
5395     llvm_unreachable("Unhandled fixed point intrinsic");
5396   }
5397 }
5398 
5399 /// Lower the call to the specified intrinsic function. If we want to emit this
5400 /// as a call to a named external function, return the name. Otherwise, lower it
5401 /// and return null.
5402 const char *
5403 SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
5404   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5405   SDLoc sdl = getCurSDLoc();
5406   DebugLoc dl = getCurDebugLoc();
5407   SDValue Res;
5408 
5409   switch (Intrinsic) {
5410   default:
5411     // By default, turn this into a target intrinsic node.
5412     visitTargetIntrinsic(I, Intrinsic);
5413     return nullptr;
5414   case Intrinsic::vastart:  visitVAStart(I); return nullptr;
5415   case Intrinsic::vaend:    visitVAEnd(I); return nullptr;
5416   case Intrinsic::vacopy:   visitVACopy(I); return nullptr;
5417   case Intrinsic::returnaddress:
5418     setValue(&I, DAG.getNode(ISD::RETURNADDR, sdl,
5419                              TLI.getPointerTy(DAG.getDataLayout()),
5420                              getValue(I.getArgOperand(0))));
5421     return nullptr;
5422   case Intrinsic::addressofreturnaddress:
5423     setValue(&I, DAG.getNode(ISD::ADDROFRETURNADDR, sdl,
5424                              TLI.getPointerTy(DAG.getDataLayout())));
5425     return nullptr;
5426   case Intrinsic::sponentry:
5427     setValue(&I, DAG.getNode(ISD::SPONENTRY, sdl,
5428                              TLI.getPointerTy(DAG.getDataLayout())));
5429     return nullptr;
5430   case Intrinsic::frameaddress:
5431     setValue(&I, DAG.getNode(ISD::FRAMEADDR, sdl,
5432                              TLI.getPointerTy(DAG.getDataLayout()),
5433                              getValue(I.getArgOperand(0))));
5434     return nullptr;
5435   case Intrinsic::read_register: {
5436     Value *Reg = I.getArgOperand(0);
5437     SDValue Chain = getRoot();
5438     SDValue RegName =
5439         DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
5440     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5441     Res = DAG.getNode(ISD::READ_REGISTER, sdl,
5442       DAG.getVTList(VT, MVT::Other), Chain, RegName);
5443     setValue(&I, Res);
5444     DAG.setRoot(Res.getValue(1));
5445     return nullptr;
5446   }
5447   case Intrinsic::write_register: {
5448     Value *Reg = I.getArgOperand(0);
5449     Value *RegValue = I.getArgOperand(1);
5450     SDValue Chain = getRoot();
5451     SDValue RegName =
5452         DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
5453     DAG.setRoot(DAG.getNode(ISD::WRITE_REGISTER, sdl, MVT::Other, Chain,
5454                             RegName, getValue(RegValue)));
5455     return nullptr;
5456   }
5457   case Intrinsic::setjmp:
5458     return &"_setjmp"[!TLI.usesUnderscoreSetJmp()];
5459   case Intrinsic::longjmp:
5460     return &"_longjmp"[!TLI.usesUnderscoreLongJmp()];
5461   case Intrinsic::memcpy: {
5462     const auto &MCI = cast<MemCpyInst>(I);
5463     SDValue Op1 = getValue(I.getArgOperand(0));
5464     SDValue Op2 = getValue(I.getArgOperand(1));
5465     SDValue Op3 = getValue(I.getArgOperand(2));
5466     // @llvm.memcpy defines 0 and 1 to both mean no alignment.
5467     unsigned DstAlign = std::max<unsigned>(MCI.getDestAlignment(), 1);
5468     unsigned SrcAlign = std::max<unsigned>(MCI.getSourceAlignment(), 1);
5469     unsigned Align = MinAlign(DstAlign, SrcAlign);
5470     bool isVol = MCI.isVolatile();
5471     bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
5472     // FIXME: Support passing different dest/src alignments to the memcpy DAG
5473     // node.
5474     SDValue MC = DAG.getMemcpy(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
5475                                false, isTC,
5476                                MachinePointerInfo(I.getArgOperand(0)),
5477                                MachinePointerInfo(I.getArgOperand(1)));
5478     updateDAGForMaybeTailCall(MC);
5479     return nullptr;
5480   }
5481   case Intrinsic::memset: {
5482     const auto &MSI = cast<MemSetInst>(I);
5483     SDValue Op1 = getValue(I.getArgOperand(0));
5484     SDValue Op2 = getValue(I.getArgOperand(1));
5485     SDValue Op3 = getValue(I.getArgOperand(2));
5486     // @llvm.memset defines 0 and 1 to both mean no alignment.
5487     unsigned Align = std::max<unsigned>(MSI.getDestAlignment(), 1);
5488     bool isVol = MSI.isVolatile();
5489     bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
5490     SDValue MS = DAG.getMemset(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
5491                                isTC, MachinePointerInfo(I.getArgOperand(0)));
5492     updateDAGForMaybeTailCall(MS);
5493     return nullptr;
5494   }
5495   case Intrinsic::memmove: {
5496     const auto &MMI = cast<MemMoveInst>(I);
5497     SDValue Op1 = getValue(I.getArgOperand(0));
5498     SDValue Op2 = getValue(I.getArgOperand(1));
5499     SDValue Op3 = getValue(I.getArgOperand(2));
5500     // @llvm.memmove defines 0 and 1 to both mean no alignment.
5501     unsigned DstAlign = std::max<unsigned>(MMI.getDestAlignment(), 1);
5502     unsigned SrcAlign = std::max<unsigned>(MMI.getSourceAlignment(), 1);
5503     unsigned Align = MinAlign(DstAlign, SrcAlign);
5504     bool isVol = MMI.isVolatile();
5505     bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
5506     // FIXME: Support passing different dest/src alignments to the memmove DAG
5507     // node.
5508     SDValue MM = DAG.getMemmove(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
5509                                 isTC, MachinePointerInfo(I.getArgOperand(0)),
5510                                 MachinePointerInfo(I.getArgOperand(1)));
5511     updateDAGForMaybeTailCall(MM);
5512     return nullptr;
5513   }
5514   case Intrinsic::memcpy_element_unordered_atomic: {
5515     const AtomicMemCpyInst &MI = cast<AtomicMemCpyInst>(I);
5516     SDValue Dst = getValue(MI.getRawDest());
5517     SDValue Src = getValue(MI.getRawSource());
5518     SDValue Length = getValue(MI.getLength());
5519 
5520     unsigned DstAlign = MI.getDestAlignment();
5521     unsigned SrcAlign = MI.getSourceAlignment();
5522     Type *LengthTy = MI.getLength()->getType();
5523     unsigned ElemSz = MI.getElementSizeInBytes();
5524     bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
5525     SDValue MC = DAG.getAtomicMemcpy(getRoot(), sdl, Dst, DstAlign, Src,
5526                                      SrcAlign, Length, LengthTy, ElemSz, isTC,
5527                                      MachinePointerInfo(MI.getRawDest()),
5528                                      MachinePointerInfo(MI.getRawSource()));
5529     updateDAGForMaybeTailCall(MC);
5530     return nullptr;
5531   }
5532   case Intrinsic::memmove_element_unordered_atomic: {
5533     auto &MI = cast<AtomicMemMoveInst>(I);
5534     SDValue Dst = getValue(MI.getRawDest());
5535     SDValue Src = getValue(MI.getRawSource());
5536     SDValue Length = getValue(MI.getLength());
5537 
5538     unsigned DstAlign = MI.getDestAlignment();
5539     unsigned SrcAlign = MI.getSourceAlignment();
5540     Type *LengthTy = MI.getLength()->getType();
5541     unsigned ElemSz = MI.getElementSizeInBytes();
5542     bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
5543     SDValue MC = DAG.getAtomicMemmove(getRoot(), sdl, Dst, DstAlign, Src,
5544                                       SrcAlign, Length, LengthTy, ElemSz, isTC,
5545                                       MachinePointerInfo(MI.getRawDest()),
5546                                       MachinePointerInfo(MI.getRawSource()));
5547     updateDAGForMaybeTailCall(MC);
5548     return nullptr;
5549   }
5550   case Intrinsic::memset_element_unordered_atomic: {
5551     auto &MI = cast<AtomicMemSetInst>(I);
5552     SDValue Dst = getValue(MI.getRawDest());
5553     SDValue Val = getValue(MI.getValue());
5554     SDValue Length = getValue(MI.getLength());
5555 
5556     unsigned DstAlign = MI.getDestAlignment();
5557     Type *LengthTy = MI.getLength()->getType();
5558     unsigned ElemSz = MI.getElementSizeInBytes();
5559     bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
5560     SDValue MC = DAG.getAtomicMemset(getRoot(), sdl, Dst, DstAlign, Val, Length,
5561                                      LengthTy, ElemSz, isTC,
5562                                      MachinePointerInfo(MI.getRawDest()));
5563     updateDAGForMaybeTailCall(MC);
5564     return nullptr;
5565   }
5566   case Intrinsic::dbg_addr:
5567   case Intrinsic::dbg_declare: {
5568     const auto &DI = cast<DbgVariableIntrinsic>(I);
5569     DILocalVariable *Variable = DI.getVariable();
5570     DIExpression *Expression = DI.getExpression();
5571     dropDanglingDebugInfo(Variable, Expression);
5572     assert(Variable && "Missing variable");
5573 
5574     // Check if address has undef value.
5575     const Value *Address = DI.getVariableLocation();
5576     if (!Address || isa<UndefValue>(Address) ||
5577         (Address->use_empty() && !isa<Argument>(Address))) {
5578       LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
5579       return nullptr;
5580     }
5581 
5582     bool isParameter = Variable->isParameter() || isa<Argument>(Address);
5583 
5584     // Check if this variable can be described by a frame index, typically
5585     // either as a static alloca or a byval parameter.
5586     int FI = std::numeric_limits<int>::max();
5587     if (const auto *AI =
5588             dyn_cast<AllocaInst>(Address->stripInBoundsConstantOffsets())) {
5589       if (AI->isStaticAlloca()) {
5590         auto I = FuncInfo.StaticAllocaMap.find(AI);
5591         if (I != FuncInfo.StaticAllocaMap.end())
5592           FI = I->second;
5593       }
5594     } else if (const auto *Arg = dyn_cast<Argument>(
5595                    Address->stripInBoundsConstantOffsets())) {
5596       FI = FuncInfo.getArgumentFrameIndex(Arg);
5597     }
5598 
5599     // llvm.dbg.addr is control dependent and always generates indirect
5600     // DBG_VALUE instructions. llvm.dbg.declare is handled as a frame index in
5601     // the MachineFunction variable table.
5602     if (FI != std::numeric_limits<int>::max()) {
5603       if (Intrinsic == Intrinsic::dbg_addr) {
5604         SDDbgValue *SDV = DAG.getFrameIndexDbgValue(
5605             Variable, Expression, FI, /*IsIndirect*/ true, dl, SDNodeOrder);
5606         DAG.AddDbgValue(SDV, getRoot().getNode(), isParameter);
5607       }
5608       return nullptr;
5609     }
5610 
5611     SDValue &N = NodeMap[Address];
5612     if (!N.getNode() && isa<Argument>(Address))
5613       // Check unused arguments map.
5614       N = UnusedArgNodeMap[Address];
5615     SDDbgValue *SDV;
5616     if (N.getNode()) {
5617       if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
5618         Address = BCI->getOperand(0);
5619       // Parameters are handled specially.
5620       auto FINode = dyn_cast<FrameIndexSDNode>(N.getNode());
5621       if (isParameter && FINode) {
5622         // Byval parameter. We have a frame index at this point.
5623         SDV =
5624             DAG.getFrameIndexDbgValue(Variable, Expression, FINode->getIndex(),
5625                                       /*IsIndirect*/ true, dl, SDNodeOrder);
5626       } else if (isa<Argument>(Address)) {
5627         // Address is an argument, so try to emit its dbg value using
5628         // virtual register info from the FuncInfo.ValueMap.
5629         EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, true, N);
5630         return nullptr;
5631       } else {
5632         SDV = DAG.getDbgValue(Variable, Expression, N.getNode(), N.getResNo(),
5633                               true, dl, SDNodeOrder);
5634       }
5635       DAG.AddDbgValue(SDV, N.getNode(), isParameter);
5636     } else {
5637       // If Address is an argument then try to emit its dbg value using
5638       // virtual register info from the FuncInfo.ValueMap.
5639       if (!EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, true,
5640                                     N)) {
5641         LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
5642       }
5643     }
5644     return nullptr;
5645   }
5646   case Intrinsic::dbg_label: {
5647     const DbgLabelInst &DI = cast<DbgLabelInst>(I);
5648     DILabel *Label = DI.getLabel();
5649     assert(Label && "Missing label");
5650 
5651     SDDbgLabel *SDV;
5652     SDV = DAG.getDbgLabel(Label, dl, SDNodeOrder);
5653     DAG.AddDbgLabel(SDV);
5654     return nullptr;
5655   }
5656   case Intrinsic::dbg_value: {
5657     const DbgValueInst &DI = cast<DbgValueInst>(I);
5658     assert(DI.getVariable() && "Missing variable");
5659 
5660     DILocalVariable *Variable = DI.getVariable();
5661     DIExpression *Expression = DI.getExpression();
5662     dropDanglingDebugInfo(Variable, Expression);
5663     const Value *V = DI.getValue();
5664     if (!V)
5665       return nullptr;
5666 
5667     if (handleDebugValue(V, Variable, Expression, dl, DI.getDebugLoc(),
5668         SDNodeOrder))
5669       return nullptr;
5670 
5671     // TODO: Dangling debug info will eventually either be resolved or produce
5672     // an Undef DBG_VALUE. However in the resolution case, a gap may appear
5673     // between the original dbg.value location and its resolved DBG_VALUE, which
5674     // we should ideally fill with an extra Undef DBG_VALUE.
5675 
5676     DanglingDebugInfoMap[V].emplace_back(&DI, dl, SDNodeOrder);
5677     return nullptr;
5678   }
5679 
5680   case Intrinsic::eh_typeid_for: {
5681     // Find the type id for the given typeinfo.
5682     GlobalValue *GV = ExtractTypeInfo(I.getArgOperand(0));
5683     unsigned TypeID = DAG.getMachineFunction().getTypeIDFor(GV);
5684     Res = DAG.getConstant(TypeID, sdl, MVT::i32);
5685     setValue(&I, Res);
5686     return nullptr;
5687   }
5688 
5689   case Intrinsic::eh_return_i32:
5690   case Intrinsic::eh_return_i64:
5691     DAG.getMachineFunction().setCallsEHReturn(true);
5692     DAG.setRoot(DAG.getNode(ISD::EH_RETURN, sdl,
5693                             MVT::Other,
5694                             getControlRoot(),
5695                             getValue(I.getArgOperand(0)),
5696                             getValue(I.getArgOperand(1))));
5697     return nullptr;
5698   case Intrinsic::eh_unwind_init:
5699     DAG.getMachineFunction().setCallsUnwindInit(true);
5700     return nullptr;
5701   case Intrinsic::eh_dwarf_cfa:
5702     setValue(&I, DAG.getNode(ISD::EH_DWARF_CFA, sdl,
5703                              TLI.getPointerTy(DAG.getDataLayout()),
5704                              getValue(I.getArgOperand(0))));
5705     return nullptr;
5706   case Intrinsic::eh_sjlj_callsite: {
5707     MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
5708     ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(0));
5709     assert(CI && "Non-constant call site value in eh.sjlj.callsite!");
5710     assert(MMI.getCurrentCallSite() == 0 && "Overlapping call sites!");
5711 
5712     MMI.setCurrentCallSite(CI->getZExtValue());
5713     return nullptr;
5714   }
5715   case Intrinsic::eh_sjlj_functioncontext: {
5716     // Get and store the index of the function context.
5717     MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
5718     AllocaInst *FnCtx =
5719       cast<AllocaInst>(I.getArgOperand(0)->stripPointerCasts());
5720     int FI = FuncInfo.StaticAllocaMap[FnCtx];
5721     MFI.setFunctionContextIndex(FI);
5722     return nullptr;
5723   }
5724   case Intrinsic::eh_sjlj_setjmp: {
5725     SDValue Ops[2];
5726     Ops[0] = getRoot();
5727     Ops[1] = getValue(I.getArgOperand(0));
5728     SDValue Op = DAG.getNode(ISD::EH_SJLJ_SETJMP, sdl,
5729                              DAG.getVTList(MVT::i32, MVT::Other), Ops);
5730     setValue(&I, Op.getValue(0));
5731     DAG.setRoot(Op.getValue(1));
5732     return nullptr;
5733   }
5734   case Intrinsic::eh_sjlj_longjmp:
5735     DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, sdl, MVT::Other,
5736                             getRoot(), getValue(I.getArgOperand(0))));
5737     return nullptr;
5738   case Intrinsic::eh_sjlj_setup_dispatch:
5739     DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_SETUP_DISPATCH, sdl, MVT::Other,
5740                             getRoot()));
5741     return nullptr;
5742   case Intrinsic::masked_gather:
5743     visitMaskedGather(I);
5744     return nullptr;
5745   case Intrinsic::masked_load:
5746     visitMaskedLoad(I);
5747     return nullptr;
5748   case Intrinsic::masked_scatter:
5749     visitMaskedScatter(I);
5750     return nullptr;
5751   case Intrinsic::masked_store:
5752     visitMaskedStore(I);
5753     return nullptr;
5754   case Intrinsic::masked_expandload:
5755     visitMaskedLoad(I, true /* IsExpanding */);
5756     return nullptr;
5757   case Intrinsic::masked_compressstore:
5758     visitMaskedStore(I, true /* IsCompressing */);
5759     return nullptr;
5760   case Intrinsic::x86_mmx_pslli_w:
5761   case Intrinsic::x86_mmx_pslli_d:
5762   case Intrinsic::x86_mmx_pslli_q:
5763   case Intrinsic::x86_mmx_psrli_w:
5764   case Intrinsic::x86_mmx_psrli_d:
5765   case Intrinsic::x86_mmx_psrli_q:
5766   case Intrinsic::x86_mmx_psrai_w:
5767   case Intrinsic::x86_mmx_psrai_d: {
5768     SDValue ShAmt = getValue(I.getArgOperand(1));
5769     if (isa<ConstantSDNode>(ShAmt)) {
5770       visitTargetIntrinsic(I, Intrinsic);
5771       return nullptr;
5772     }
5773     unsigned NewIntrinsic = 0;
5774     EVT ShAmtVT = MVT::v2i32;
5775     switch (Intrinsic) {
5776     case Intrinsic::x86_mmx_pslli_w:
5777       NewIntrinsic = Intrinsic::x86_mmx_psll_w;
5778       break;
5779     case Intrinsic::x86_mmx_pslli_d:
5780       NewIntrinsic = Intrinsic::x86_mmx_psll_d;
5781       break;
5782     case Intrinsic::x86_mmx_pslli_q:
5783       NewIntrinsic = Intrinsic::x86_mmx_psll_q;
5784       break;
5785     case Intrinsic::x86_mmx_psrli_w:
5786       NewIntrinsic = Intrinsic::x86_mmx_psrl_w;
5787       break;
5788     case Intrinsic::x86_mmx_psrli_d:
5789       NewIntrinsic = Intrinsic::x86_mmx_psrl_d;
5790       break;
5791     case Intrinsic::x86_mmx_psrli_q:
5792       NewIntrinsic = Intrinsic::x86_mmx_psrl_q;
5793       break;
5794     case Intrinsic::x86_mmx_psrai_w:
5795       NewIntrinsic = Intrinsic::x86_mmx_psra_w;
5796       break;
5797     case Intrinsic::x86_mmx_psrai_d:
5798       NewIntrinsic = Intrinsic::x86_mmx_psra_d;
5799       break;
5800     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
5801     }
5802 
5803     // The vector shift intrinsics with scalars uses 32b shift amounts but
5804     // the sse2/mmx shift instructions reads 64 bits. Set the upper 32 bits
5805     // to be zero.
5806     // We must do this early because v2i32 is not a legal type.
5807     SDValue ShOps[2];
5808     ShOps[0] = ShAmt;
5809     ShOps[1] = DAG.getConstant(0, sdl, MVT::i32);
5810     ShAmt =  DAG.getBuildVector(ShAmtVT, sdl, ShOps);
5811     EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5812     ShAmt = DAG.getNode(ISD::BITCAST, sdl, DestVT, ShAmt);
5813     Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, sdl, DestVT,
5814                        DAG.getConstant(NewIntrinsic, sdl, MVT::i32),
5815                        getValue(I.getArgOperand(0)), ShAmt);
5816     setValue(&I, Res);
5817     return nullptr;
5818   }
5819   case Intrinsic::powi:
5820     setValue(&I, ExpandPowI(sdl, getValue(I.getArgOperand(0)),
5821                             getValue(I.getArgOperand(1)), DAG));
5822     return nullptr;
5823   case Intrinsic::log:
5824     setValue(&I, expandLog(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5825     return nullptr;
5826   case Intrinsic::log2:
5827     setValue(&I, expandLog2(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5828     return nullptr;
5829   case Intrinsic::log10:
5830     setValue(&I, expandLog10(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5831     return nullptr;
5832   case Intrinsic::exp:
5833     setValue(&I, expandExp(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5834     return nullptr;
5835   case Intrinsic::exp2:
5836     setValue(&I, expandExp2(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5837     return nullptr;
5838   case Intrinsic::pow:
5839     setValue(&I, expandPow(sdl, getValue(I.getArgOperand(0)),
5840                            getValue(I.getArgOperand(1)), DAG, TLI));
5841     return nullptr;
5842   case Intrinsic::sqrt:
5843   case Intrinsic::fabs:
5844   case Intrinsic::sin:
5845   case Intrinsic::cos:
5846   case Intrinsic::floor:
5847   case Intrinsic::ceil:
5848   case Intrinsic::trunc:
5849   case Intrinsic::rint:
5850   case Intrinsic::nearbyint:
5851   case Intrinsic::round:
5852   case Intrinsic::canonicalize: {
5853     unsigned Opcode;
5854     switch (Intrinsic) {
5855     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
5856     case Intrinsic::sqrt:      Opcode = ISD::FSQRT;      break;
5857     case Intrinsic::fabs:      Opcode = ISD::FABS;       break;
5858     case Intrinsic::sin:       Opcode = ISD::FSIN;       break;
5859     case Intrinsic::cos:       Opcode = ISD::FCOS;       break;
5860     case Intrinsic::floor:     Opcode = ISD::FFLOOR;     break;
5861     case Intrinsic::ceil:      Opcode = ISD::FCEIL;      break;
5862     case Intrinsic::trunc:     Opcode = ISD::FTRUNC;     break;
5863     case Intrinsic::rint:      Opcode = ISD::FRINT;      break;
5864     case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break;
5865     case Intrinsic::round:     Opcode = ISD::FROUND;     break;
5866     case Intrinsic::canonicalize: Opcode = ISD::FCANONICALIZE; break;
5867     }
5868 
5869     setValue(&I, DAG.getNode(Opcode, sdl,
5870                              getValue(I.getArgOperand(0)).getValueType(),
5871                              getValue(I.getArgOperand(0))));
5872     return nullptr;
5873   }
5874   case Intrinsic::minnum: {
5875     auto VT = getValue(I.getArgOperand(0)).getValueType();
5876     unsigned Opc =
5877         I.hasNoNaNs() && TLI.isOperationLegalOrCustom(ISD::FMINIMUM, VT)
5878             ? ISD::FMINIMUM
5879             : ISD::FMINNUM;
5880     setValue(&I, DAG.getNode(Opc, sdl, VT,
5881                              getValue(I.getArgOperand(0)),
5882                              getValue(I.getArgOperand(1))));
5883     return nullptr;
5884   }
5885   case Intrinsic::maxnum: {
5886     auto VT = getValue(I.getArgOperand(0)).getValueType();
5887     unsigned Opc =
5888         I.hasNoNaNs() && TLI.isOperationLegalOrCustom(ISD::FMAXIMUM, VT)
5889             ? ISD::FMAXIMUM
5890             : ISD::FMAXNUM;
5891     setValue(&I, DAG.getNode(Opc, sdl, VT,
5892                              getValue(I.getArgOperand(0)),
5893                              getValue(I.getArgOperand(1))));
5894     return nullptr;
5895   }
5896   case Intrinsic::minimum:
5897     setValue(&I, DAG.getNode(ISD::FMINIMUM, sdl,
5898                              getValue(I.getArgOperand(0)).getValueType(),
5899                              getValue(I.getArgOperand(0)),
5900                              getValue(I.getArgOperand(1))));
5901     return nullptr;
5902   case Intrinsic::maximum:
5903     setValue(&I, DAG.getNode(ISD::FMAXIMUM, sdl,
5904                              getValue(I.getArgOperand(0)).getValueType(),
5905                              getValue(I.getArgOperand(0)),
5906                              getValue(I.getArgOperand(1))));
5907     return nullptr;
5908   case Intrinsic::copysign:
5909     setValue(&I, DAG.getNode(ISD::FCOPYSIGN, sdl,
5910                              getValue(I.getArgOperand(0)).getValueType(),
5911                              getValue(I.getArgOperand(0)),
5912                              getValue(I.getArgOperand(1))));
5913     return nullptr;
5914   case Intrinsic::fma:
5915     setValue(&I, DAG.getNode(ISD::FMA, sdl,
5916                              getValue(I.getArgOperand(0)).getValueType(),
5917                              getValue(I.getArgOperand(0)),
5918                              getValue(I.getArgOperand(1)),
5919                              getValue(I.getArgOperand(2))));
5920     return nullptr;
5921   case Intrinsic::experimental_constrained_fadd:
5922   case Intrinsic::experimental_constrained_fsub:
5923   case Intrinsic::experimental_constrained_fmul:
5924   case Intrinsic::experimental_constrained_fdiv:
5925   case Intrinsic::experimental_constrained_frem:
5926   case Intrinsic::experimental_constrained_fma:
5927   case Intrinsic::experimental_constrained_sqrt:
5928   case Intrinsic::experimental_constrained_pow:
5929   case Intrinsic::experimental_constrained_powi:
5930   case Intrinsic::experimental_constrained_sin:
5931   case Intrinsic::experimental_constrained_cos:
5932   case Intrinsic::experimental_constrained_exp:
5933   case Intrinsic::experimental_constrained_exp2:
5934   case Intrinsic::experimental_constrained_log:
5935   case Intrinsic::experimental_constrained_log10:
5936   case Intrinsic::experimental_constrained_log2:
5937   case Intrinsic::experimental_constrained_rint:
5938   case Intrinsic::experimental_constrained_nearbyint:
5939   case Intrinsic::experimental_constrained_maxnum:
5940   case Intrinsic::experimental_constrained_minnum:
5941   case Intrinsic::experimental_constrained_ceil:
5942   case Intrinsic::experimental_constrained_floor:
5943   case Intrinsic::experimental_constrained_round:
5944   case Intrinsic::experimental_constrained_trunc:
5945     visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(I));
5946     return nullptr;
5947   case Intrinsic::fmuladd: {
5948     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5949     if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
5950         TLI.isFMAFasterThanFMulAndFAdd(VT)) {
5951       setValue(&I, DAG.getNode(ISD::FMA, sdl,
5952                                getValue(I.getArgOperand(0)).getValueType(),
5953                                getValue(I.getArgOperand(0)),
5954                                getValue(I.getArgOperand(1)),
5955                                getValue(I.getArgOperand(2))));
5956     } else {
5957       // TODO: Intrinsic calls should have fast-math-flags.
5958       SDValue Mul = DAG.getNode(ISD::FMUL, sdl,
5959                                 getValue(I.getArgOperand(0)).getValueType(),
5960                                 getValue(I.getArgOperand(0)),
5961                                 getValue(I.getArgOperand(1)));
5962       SDValue Add = DAG.getNode(ISD::FADD, sdl,
5963                                 getValue(I.getArgOperand(0)).getValueType(),
5964                                 Mul,
5965                                 getValue(I.getArgOperand(2)));
5966       setValue(&I, Add);
5967     }
5968     return nullptr;
5969   }
5970   case Intrinsic::convert_to_fp16:
5971     setValue(&I, DAG.getNode(ISD::BITCAST, sdl, MVT::i16,
5972                              DAG.getNode(ISD::FP_ROUND, sdl, MVT::f16,
5973                                          getValue(I.getArgOperand(0)),
5974                                          DAG.getTargetConstant(0, sdl,
5975                                                                MVT::i32))));
5976     return nullptr;
5977   case Intrinsic::convert_from_fp16:
5978     setValue(&I, DAG.getNode(ISD::FP_EXTEND, sdl,
5979                              TLI.getValueType(DAG.getDataLayout(), I.getType()),
5980                              DAG.getNode(ISD::BITCAST, sdl, MVT::f16,
5981                                          getValue(I.getArgOperand(0)))));
5982     return nullptr;
5983   case Intrinsic::pcmarker: {
5984     SDValue Tmp = getValue(I.getArgOperand(0));
5985     DAG.setRoot(DAG.getNode(ISD::PCMARKER, sdl, MVT::Other, getRoot(), Tmp));
5986     return nullptr;
5987   }
5988   case Intrinsic::readcyclecounter: {
5989     SDValue Op = getRoot();
5990     Res = DAG.getNode(ISD::READCYCLECOUNTER, sdl,
5991                       DAG.getVTList(MVT::i64, MVT::Other), Op);
5992     setValue(&I, Res);
5993     DAG.setRoot(Res.getValue(1));
5994     return nullptr;
5995   }
5996   case Intrinsic::bitreverse:
5997     setValue(&I, DAG.getNode(ISD::BITREVERSE, sdl,
5998                              getValue(I.getArgOperand(0)).getValueType(),
5999                              getValue(I.getArgOperand(0))));
6000     return nullptr;
6001   case Intrinsic::bswap:
6002     setValue(&I, DAG.getNode(ISD::BSWAP, sdl,
6003                              getValue(I.getArgOperand(0)).getValueType(),
6004                              getValue(I.getArgOperand(0))));
6005     return nullptr;
6006   case Intrinsic::cttz: {
6007     SDValue Arg = getValue(I.getArgOperand(0));
6008     ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
6009     EVT Ty = Arg.getValueType();
6010     setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_UNDEF,
6011                              sdl, Ty, Arg));
6012     return nullptr;
6013   }
6014   case Intrinsic::ctlz: {
6015     SDValue Arg = getValue(I.getArgOperand(0));
6016     ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
6017     EVT Ty = Arg.getValueType();
6018     setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_UNDEF,
6019                              sdl, Ty, Arg));
6020     return nullptr;
6021   }
6022   case Intrinsic::ctpop: {
6023     SDValue Arg = getValue(I.getArgOperand(0));
6024     EVT Ty = Arg.getValueType();
6025     setValue(&I, DAG.getNode(ISD::CTPOP, sdl, Ty, Arg));
6026     return nullptr;
6027   }
6028   case Intrinsic::fshl:
6029   case Intrinsic::fshr: {
6030     bool IsFSHL = Intrinsic == Intrinsic::fshl;
6031     SDValue X = getValue(I.getArgOperand(0));
6032     SDValue Y = getValue(I.getArgOperand(1));
6033     SDValue Z = getValue(I.getArgOperand(2));
6034     EVT VT = X.getValueType();
6035     SDValue BitWidthC = DAG.getConstant(VT.getScalarSizeInBits(), sdl, VT);
6036     SDValue Zero = DAG.getConstant(0, sdl, VT);
6037     SDValue ShAmt = DAG.getNode(ISD::UREM, sdl, VT, Z, BitWidthC);
6038 
6039     auto FunnelOpcode = IsFSHL ? ISD::FSHL : ISD::FSHR;
6040     if (TLI.isOperationLegalOrCustom(FunnelOpcode, VT)) {
6041       setValue(&I, DAG.getNode(FunnelOpcode, sdl, VT, X, Y, Z));
6042       return nullptr;
6043     }
6044 
6045     // When X == Y, this is rotate. If the data type has a power-of-2 size, we
6046     // avoid the select that is necessary in the general case to filter out
6047     // the 0-shift possibility that leads to UB.
6048     if (X == Y && isPowerOf2_32(VT.getScalarSizeInBits())) {
6049       auto RotateOpcode = IsFSHL ? ISD::ROTL : ISD::ROTR;
6050       if (TLI.isOperationLegalOrCustom(RotateOpcode, VT)) {
6051         setValue(&I, DAG.getNode(RotateOpcode, sdl, VT, X, Z));
6052         return nullptr;
6053       }
6054 
6055       // Some targets only rotate one way. Try the opposite direction.
6056       RotateOpcode = IsFSHL ? ISD::ROTR : ISD::ROTL;
6057       if (TLI.isOperationLegalOrCustom(RotateOpcode, VT)) {
6058         // Negate the shift amount because it is safe to ignore the high bits.
6059         SDValue NegShAmt = DAG.getNode(ISD::SUB, sdl, VT, Zero, Z);
6060         setValue(&I, DAG.getNode(RotateOpcode, sdl, VT, X, NegShAmt));
6061         return nullptr;
6062       }
6063 
6064       // fshl (rotl): (X << (Z % BW)) | (X >> ((0 - Z) % BW))
6065       // fshr (rotr): (X << ((0 - Z) % BW)) | (X >> (Z % BW))
6066       SDValue NegZ = DAG.getNode(ISD::SUB, sdl, VT, Zero, Z);
6067       SDValue NShAmt = DAG.getNode(ISD::UREM, sdl, VT, NegZ, BitWidthC);
6068       SDValue ShX = DAG.getNode(ISD::SHL, sdl, VT, X, IsFSHL ? ShAmt : NShAmt);
6069       SDValue ShY = DAG.getNode(ISD::SRL, sdl, VT, X, IsFSHL ? NShAmt : ShAmt);
6070       setValue(&I, DAG.getNode(ISD::OR, sdl, VT, ShX, ShY));
6071       return nullptr;
6072     }
6073 
6074     // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
6075     // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
6076     SDValue InvShAmt = DAG.getNode(ISD::SUB, sdl, VT, BitWidthC, ShAmt);
6077     SDValue ShX = DAG.getNode(ISD::SHL, sdl, VT, X, IsFSHL ? ShAmt : InvShAmt);
6078     SDValue ShY = DAG.getNode(ISD::SRL, sdl, VT, Y, IsFSHL ? InvShAmt : ShAmt);
6079     SDValue Or = DAG.getNode(ISD::OR, sdl, VT, ShX, ShY);
6080 
6081     // If (Z % BW == 0), then the opposite direction shift is shift-by-bitwidth,
6082     // and that is undefined. We must compare and select to avoid UB.
6083     EVT CCVT = MVT::i1;
6084     if (VT.isVector())
6085       CCVT = EVT::getVectorVT(*Context, CCVT, VT.getVectorNumElements());
6086 
6087     // For fshl, 0-shift returns the 1st arg (X).
6088     // For fshr, 0-shift returns the 2nd arg (Y).
6089     SDValue IsZeroShift = DAG.getSetCC(sdl, CCVT, ShAmt, Zero, ISD::SETEQ);
6090     setValue(&I, DAG.getSelect(sdl, VT, IsZeroShift, IsFSHL ? X : Y, Or));
6091     return nullptr;
6092   }
6093   case Intrinsic::sadd_sat: {
6094     SDValue Op1 = getValue(I.getArgOperand(0));
6095     SDValue Op2 = getValue(I.getArgOperand(1));
6096     setValue(&I, DAG.getNode(ISD::SADDSAT, sdl, Op1.getValueType(), Op1, Op2));
6097     return nullptr;
6098   }
6099   case Intrinsic::uadd_sat: {
6100     SDValue Op1 = getValue(I.getArgOperand(0));
6101     SDValue Op2 = getValue(I.getArgOperand(1));
6102     setValue(&I, DAG.getNode(ISD::UADDSAT, sdl, Op1.getValueType(), Op1, Op2));
6103     return nullptr;
6104   }
6105   case Intrinsic::ssub_sat: {
6106     SDValue Op1 = getValue(I.getArgOperand(0));
6107     SDValue Op2 = getValue(I.getArgOperand(1));
6108     setValue(&I, DAG.getNode(ISD::SSUBSAT, sdl, Op1.getValueType(), Op1, Op2));
6109     return nullptr;
6110   }
6111   case Intrinsic::usub_sat: {
6112     SDValue Op1 = getValue(I.getArgOperand(0));
6113     SDValue Op2 = getValue(I.getArgOperand(1));
6114     setValue(&I, DAG.getNode(ISD::USUBSAT, sdl, Op1.getValueType(), Op1, Op2));
6115     return nullptr;
6116   }
6117   case Intrinsic::smul_fix:
6118   case Intrinsic::umul_fix: {
6119     SDValue Op1 = getValue(I.getArgOperand(0));
6120     SDValue Op2 = getValue(I.getArgOperand(1));
6121     SDValue Op3 = getValue(I.getArgOperand(2));
6122     setValue(&I, DAG.getNode(FixedPointIntrinsicToOpcode(Intrinsic), sdl,
6123                              Op1.getValueType(), Op1, Op2, Op3));
6124     return nullptr;
6125   }
6126   case Intrinsic::stacksave: {
6127     SDValue Op = getRoot();
6128     Res = DAG.getNode(
6129         ISD::STACKSAVE, sdl,
6130         DAG.getVTList(TLI.getPointerTy(DAG.getDataLayout()), MVT::Other), Op);
6131     setValue(&I, Res);
6132     DAG.setRoot(Res.getValue(1));
6133     return nullptr;
6134   }
6135   case Intrinsic::stackrestore:
6136     Res = getValue(I.getArgOperand(0));
6137     DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other, getRoot(), Res));
6138     return nullptr;
6139   case Intrinsic::get_dynamic_area_offset: {
6140     SDValue Op = getRoot();
6141     EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
6142     EVT ResTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
6143     // Result type for @llvm.get.dynamic.area.offset should match PtrTy for
6144     // target.
6145     if (PtrTy != ResTy)
6146       report_fatal_error("Wrong result type for @llvm.get.dynamic.area.offset"
6147                          " intrinsic!");
6148     Res = DAG.getNode(ISD::GET_DYNAMIC_AREA_OFFSET, sdl, DAG.getVTList(ResTy),
6149                       Op);
6150     DAG.setRoot(Op);
6151     setValue(&I, Res);
6152     return nullptr;
6153   }
6154   case Intrinsic::stackguard: {
6155     EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
6156     MachineFunction &MF = DAG.getMachineFunction();
6157     const Module &M = *MF.getFunction().getParent();
6158     SDValue Chain = getRoot();
6159     if (TLI.useLoadStackGuardNode()) {
6160       Res = getLoadStackGuard(DAG, sdl, Chain);
6161     } else {
6162       const Value *Global = TLI.getSDagStackGuard(M);
6163       unsigned Align = DL->getPrefTypeAlignment(Global->getType());
6164       Res = DAG.getLoad(PtrTy, sdl, Chain, getValue(Global),
6165                         MachinePointerInfo(Global, 0), Align,
6166                         MachineMemOperand::MOVolatile);
6167     }
6168     if (TLI.useStackGuardXorFP())
6169       Res = TLI.emitStackGuardXorFP(DAG, Res, sdl);
6170     DAG.setRoot(Chain);
6171     setValue(&I, Res);
6172     return nullptr;
6173   }
6174   case Intrinsic::stackprotector: {
6175     // Emit code into the DAG to store the stack guard onto the stack.
6176     MachineFunction &MF = DAG.getMachineFunction();
6177     MachineFrameInfo &MFI = MF.getFrameInfo();
6178     EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
6179     SDValue Src, Chain = getRoot();
6180 
6181     if (TLI.useLoadStackGuardNode())
6182       Src = getLoadStackGuard(DAG, sdl, Chain);
6183     else
6184       Src = getValue(I.getArgOperand(0));   // The guard's value.
6185 
6186     AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
6187 
6188     int FI = FuncInfo.StaticAllocaMap[Slot];
6189     MFI.setStackProtectorIndex(FI);
6190 
6191     SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
6192 
6193     // Store the stack protector onto the stack.
6194     Res = DAG.getStore(Chain, sdl, Src, FIN, MachinePointerInfo::getFixedStack(
6195                                                  DAG.getMachineFunction(), FI),
6196                        /* Alignment = */ 0, MachineMemOperand::MOVolatile);
6197     setValue(&I, Res);
6198     DAG.setRoot(Res);
6199     return nullptr;
6200   }
6201   case Intrinsic::objectsize: {
6202     // If we don't know by now, we're never going to know.
6203     ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(1));
6204 
6205     assert(CI && "Non-constant type in __builtin_object_size?");
6206 
6207     SDValue Arg = getValue(I.getCalledValue());
6208     EVT Ty = Arg.getValueType();
6209 
6210     if (CI->isZero())
6211       Res = DAG.getConstant(-1ULL, sdl, Ty);
6212     else
6213       Res = DAG.getConstant(0, sdl, Ty);
6214 
6215     setValue(&I, Res);
6216     return nullptr;
6217   }
6218 
6219   case Intrinsic::is_constant:
6220     // If this wasn't constant-folded away by now, then it's not a
6221     // constant.
6222     setValue(&I, DAG.getConstant(0, sdl, MVT::i1));
6223     return nullptr;
6224 
6225   case Intrinsic::annotation:
6226   case Intrinsic::ptr_annotation:
6227   case Intrinsic::launder_invariant_group:
6228   case Intrinsic::strip_invariant_group:
6229     // Drop the intrinsic, but forward the value
6230     setValue(&I, getValue(I.getOperand(0)));
6231     return nullptr;
6232   case Intrinsic::assume:
6233   case Intrinsic::var_annotation:
6234   case Intrinsic::sideeffect:
6235     // Discard annotate attributes, assumptions, and artificial side-effects.
6236     return nullptr;
6237 
6238   case Intrinsic::codeview_annotation: {
6239     // Emit a label associated with this metadata.
6240     MachineFunction &MF = DAG.getMachineFunction();
6241     MCSymbol *Label =
6242         MF.getMMI().getContext().createTempSymbol("annotation", true);
6243     Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(0))->getMetadata();
6244     MF.addCodeViewAnnotation(Label, cast<MDNode>(MD));
6245     Res = DAG.getLabelNode(ISD::ANNOTATION_LABEL, sdl, getRoot(), Label);
6246     DAG.setRoot(Res);
6247     return nullptr;
6248   }
6249 
6250   case Intrinsic::init_trampoline: {
6251     const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts());
6252 
6253     SDValue Ops[6];
6254     Ops[0] = getRoot();
6255     Ops[1] = getValue(I.getArgOperand(0));
6256     Ops[2] = getValue(I.getArgOperand(1));
6257     Ops[3] = getValue(I.getArgOperand(2));
6258     Ops[4] = DAG.getSrcValue(I.getArgOperand(0));
6259     Ops[5] = DAG.getSrcValue(F);
6260 
6261     Res = DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other, Ops);
6262 
6263     DAG.setRoot(Res);
6264     return nullptr;
6265   }
6266   case Intrinsic::adjust_trampoline:
6267     setValue(&I, DAG.getNode(ISD::ADJUST_TRAMPOLINE, sdl,
6268                              TLI.getPointerTy(DAG.getDataLayout()),
6269                              getValue(I.getArgOperand(0))));
6270     return nullptr;
6271   case Intrinsic::gcroot: {
6272     assert(DAG.getMachineFunction().getFunction().hasGC() &&
6273            "only valid in functions with gc specified, enforced by Verifier");
6274     assert(GFI && "implied by previous");
6275     const Value *Alloca = I.getArgOperand(0)->stripPointerCasts();
6276     const Constant *TypeMap = cast<Constant>(I.getArgOperand(1));
6277 
6278     FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
6279     GFI->addStackRoot(FI->getIndex(), TypeMap);
6280     return nullptr;
6281   }
6282   case Intrinsic::gcread:
6283   case Intrinsic::gcwrite:
6284     llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
6285   case Intrinsic::flt_rounds:
6286     setValue(&I, DAG.getNode(ISD::FLT_ROUNDS_, sdl, MVT::i32));
6287     return nullptr;
6288 
6289   case Intrinsic::expect:
6290     // Just replace __builtin_expect(exp, c) with EXP.
6291     setValue(&I, getValue(I.getArgOperand(0)));
6292     return nullptr;
6293 
6294   case Intrinsic::debugtrap:
6295   case Intrinsic::trap: {
6296     StringRef TrapFuncName =
6297         I.getAttributes()
6298             .getAttribute(AttributeList::FunctionIndex, "trap-func-name")
6299             .getValueAsString();
6300     if (TrapFuncName.empty()) {
6301       ISD::NodeType Op = (Intrinsic == Intrinsic::trap) ?
6302         ISD::TRAP : ISD::DEBUGTRAP;
6303       DAG.setRoot(DAG.getNode(Op, sdl,MVT::Other, getRoot()));
6304       return nullptr;
6305     }
6306     TargetLowering::ArgListTy Args;
6307 
6308     TargetLowering::CallLoweringInfo CLI(DAG);
6309     CLI.setDebugLoc(sdl).setChain(getRoot()).setLibCallee(
6310         CallingConv::C, I.getType(),
6311         DAG.getExternalSymbol(TrapFuncName.data(),
6312                               TLI.getPointerTy(DAG.getDataLayout())),
6313         std::move(Args));
6314 
6315     std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
6316     DAG.setRoot(Result.second);
6317     return nullptr;
6318   }
6319 
6320   case Intrinsic::uadd_with_overflow:
6321   case Intrinsic::sadd_with_overflow:
6322   case Intrinsic::usub_with_overflow:
6323   case Intrinsic::ssub_with_overflow:
6324   case Intrinsic::umul_with_overflow:
6325   case Intrinsic::smul_with_overflow: {
6326     ISD::NodeType Op;
6327     switch (Intrinsic) {
6328     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
6329     case Intrinsic::uadd_with_overflow: Op = ISD::UADDO; break;
6330     case Intrinsic::sadd_with_overflow: Op = ISD::SADDO; break;
6331     case Intrinsic::usub_with_overflow: Op = ISD::USUBO; break;
6332     case Intrinsic::ssub_with_overflow: Op = ISD::SSUBO; break;
6333     case Intrinsic::umul_with_overflow: Op = ISD::UMULO; break;
6334     case Intrinsic::smul_with_overflow: Op = ISD::SMULO; break;
6335     }
6336     SDValue Op1 = getValue(I.getArgOperand(0));
6337     SDValue Op2 = getValue(I.getArgOperand(1));
6338 
6339     EVT ResultVT = Op1.getValueType();
6340     EVT OverflowVT = MVT::i1;
6341     if (ResultVT.isVector())
6342       OverflowVT = EVT::getVectorVT(
6343           *Context, OverflowVT, ResultVT.getVectorNumElements());
6344 
6345     SDVTList VTs = DAG.getVTList(ResultVT, OverflowVT);
6346     setValue(&I, DAG.getNode(Op, sdl, VTs, Op1, Op2));
6347     return nullptr;
6348   }
6349   case Intrinsic::prefetch: {
6350     SDValue Ops[5];
6351     unsigned rw = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
6352     auto Flags = rw == 0 ? MachineMemOperand::MOLoad :MachineMemOperand::MOStore;
6353     Ops[0] = DAG.getRoot();
6354     Ops[1] = getValue(I.getArgOperand(0));
6355     Ops[2] = getValue(I.getArgOperand(1));
6356     Ops[3] = getValue(I.getArgOperand(2));
6357     Ops[4] = getValue(I.getArgOperand(3));
6358     SDValue Result = DAG.getMemIntrinsicNode(ISD::PREFETCH, sdl,
6359                                              DAG.getVTList(MVT::Other), Ops,
6360                                              EVT::getIntegerVT(*Context, 8),
6361                                              MachinePointerInfo(I.getArgOperand(0)),
6362                                              0, /* align */
6363                                              Flags);
6364 
6365     // Chain the prefetch in parallell with any pending loads, to stay out of
6366     // the way of later optimizations.
6367     PendingLoads.push_back(Result);
6368     Result = getRoot();
6369     DAG.setRoot(Result);
6370     return nullptr;
6371   }
6372   case Intrinsic::lifetime_start:
6373   case Intrinsic::lifetime_end: {
6374     bool IsStart = (Intrinsic == Intrinsic::lifetime_start);
6375     // Stack coloring is not enabled in O0, discard region information.
6376     if (TM.getOptLevel() == CodeGenOpt::None)
6377       return nullptr;
6378 
6379     const int64_t ObjectSize =
6380         cast<ConstantInt>(I.getArgOperand(0))->getSExtValue();
6381     Value *const ObjectPtr = I.getArgOperand(1);
6382     SmallVector<Value *, 4> Allocas;
6383     GetUnderlyingObjects(ObjectPtr, Allocas, *DL);
6384 
6385     for (SmallVectorImpl<Value*>::iterator Object = Allocas.begin(),
6386            E = Allocas.end(); Object != E; ++Object) {
6387       AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(*Object);
6388 
6389       // Could not find an Alloca.
6390       if (!LifetimeObject)
6391         continue;
6392 
6393       // First check that the Alloca is static, otherwise it won't have a
6394       // valid frame index.
6395       auto SI = FuncInfo.StaticAllocaMap.find(LifetimeObject);
6396       if (SI == FuncInfo.StaticAllocaMap.end())
6397         return nullptr;
6398 
6399       const int FrameIndex = SI->second;
6400       int64_t Offset;
6401       if (GetPointerBaseWithConstantOffset(
6402               ObjectPtr, Offset, DAG.getDataLayout()) != LifetimeObject)
6403         Offset = -1; // Cannot determine offset from alloca to lifetime object.
6404       Res = DAG.getLifetimeNode(IsStart, sdl, getRoot(), FrameIndex, ObjectSize,
6405                                 Offset);
6406       DAG.setRoot(Res);
6407     }
6408     return nullptr;
6409   }
6410   case Intrinsic::invariant_start:
6411     // Discard region information.
6412     setValue(&I, DAG.getUNDEF(TLI.getPointerTy(DAG.getDataLayout())));
6413     return nullptr;
6414   case Intrinsic::invariant_end:
6415     // Discard region information.
6416     return nullptr;
6417   case Intrinsic::clear_cache:
6418     return TLI.getClearCacheBuiltinName();
6419   case Intrinsic::donothing:
6420     // ignore
6421     return nullptr;
6422   case Intrinsic::experimental_stackmap:
6423     visitStackmap(I);
6424     return nullptr;
6425   case Intrinsic::experimental_patchpoint_void:
6426   case Intrinsic::experimental_patchpoint_i64:
6427     visitPatchpoint(&I);
6428     return nullptr;
6429   case Intrinsic::experimental_gc_statepoint:
6430     LowerStatepoint(ImmutableStatepoint(&I));
6431     return nullptr;
6432   case Intrinsic::experimental_gc_result:
6433     visitGCResult(cast<GCResultInst>(I));
6434     return nullptr;
6435   case Intrinsic::experimental_gc_relocate:
6436     visitGCRelocate(cast<GCRelocateInst>(I));
6437     return nullptr;
6438   case Intrinsic::instrprof_increment:
6439     llvm_unreachable("instrprof failed to lower an increment");
6440   case Intrinsic::instrprof_value_profile:
6441     llvm_unreachable("instrprof failed to lower a value profiling call");
6442   case Intrinsic::localescape: {
6443     MachineFunction &MF = DAG.getMachineFunction();
6444     const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
6445 
6446     // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission
6447     // is the same on all targets.
6448     for (unsigned Idx = 0, E = I.getNumArgOperands(); Idx < E; ++Idx) {
6449       Value *Arg = I.getArgOperand(Idx)->stripPointerCasts();
6450       if (isa<ConstantPointerNull>(Arg))
6451         continue; // Skip null pointers. They represent a hole in index space.
6452       AllocaInst *Slot = cast<AllocaInst>(Arg);
6453       assert(FuncInfo.StaticAllocaMap.count(Slot) &&
6454              "can only escape static allocas");
6455       int FI = FuncInfo.StaticAllocaMap[Slot];
6456       MCSymbol *FrameAllocSym =
6457           MF.getMMI().getContext().getOrCreateFrameAllocSymbol(
6458               GlobalValue::dropLLVMManglingEscape(MF.getName()), Idx);
6459       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, dl,
6460               TII->get(TargetOpcode::LOCAL_ESCAPE))
6461           .addSym(FrameAllocSym)
6462           .addFrameIndex(FI);
6463     }
6464 
6465     return nullptr;
6466   }
6467 
6468   case Intrinsic::localrecover: {
6469     // i8* @llvm.localrecover(i8* %fn, i8* %fp, i32 %idx)
6470     MachineFunction &MF = DAG.getMachineFunction();
6471     MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout(), 0);
6472 
6473     // Get the symbol that defines the frame offset.
6474     auto *Fn = cast<Function>(I.getArgOperand(0)->stripPointerCasts());
6475     auto *Idx = cast<ConstantInt>(I.getArgOperand(2));
6476     unsigned IdxVal =
6477         unsigned(Idx->getLimitedValue(std::numeric_limits<int>::max()));
6478     MCSymbol *FrameAllocSym =
6479         MF.getMMI().getContext().getOrCreateFrameAllocSymbol(
6480             GlobalValue::dropLLVMManglingEscape(Fn->getName()), IdxVal);
6481 
6482     // Create a MCSymbol for the label to avoid any target lowering
6483     // that would make this PC relative.
6484     SDValue OffsetSym = DAG.getMCSymbol(FrameAllocSym, PtrVT);
6485     SDValue OffsetVal =
6486         DAG.getNode(ISD::LOCAL_RECOVER, sdl, PtrVT, OffsetSym);
6487 
6488     // Add the offset to the FP.
6489     Value *FP = I.getArgOperand(1);
6490     SDValue FPVal = getValue(FP);
6491     SDValue Add = DAG.getNode(ISD::ADD, sdl, PtrVT, FPVal, OffsetVal);
6492     setValue(&I, Add);
6493 
6494     return nullptr;
6495   }
6496 
6497   case Intrinsic::eh_exceptionpointer:
6498   case Intrinsic::eh_exceptioncode: {
6499     // Get the exception pointer vreg, copy from it, and resize it to fit.
6500     const auto *CPI = cast<CatchPadInst>(I.getArgOperand(0));
6501     MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
6502     const TargetRegisterClass *PtrRC = TLI.getRegClassFor(PtrVT);
6503     unsigned VReg = FuncInfo.getCatchPadExceptionPointerVReg(CPI, PtrRC);
6504     SDValue N =
6505         DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(), VReg, PtrVT);
6506     if (Intrinsic == Intrinsic::eh_exceptioncode)
6507       N = DAG.getZExtOrTrunc(N, getCurSDLoc(), MVT::i32);
6508     setValue(&I, N);
6509     return nullptr;
6510   }
6511   case Intrinsic::xray_customevent: {
6512     // Here we want to make sure that the intrinsic behaves as if it has a
6513     // specific calling convention, and only for x86_64.
6514     // FIXME: Support other platforms later.
6515     const auto &Triple = DAG.getTarget().getTargetTriple();
6516     if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux())
6517       return nullptr;
6518 
6519     SDLoc DL = getCurSDLoc();
6520     SmallVector<SDValue, 8> Ops;
6521 
6522     // We want to say that we always want the arguments in registers.
6523     SDValue LogEntryVal = getValue(I.getArgOperand(0));
6524     SDValue StrSizeVal = getValue(I.getArgOperand(1));
6525     SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
6526     SDValue Chain = getRoot();
6527     Ops.push_back(LogEntryVal);
6528     Ops.push_back(StrSizeVal);
6529     Ops.push_back(Chain);
6530 
6531     // We need to enforce the calling convention for the callsite, so that
6532     // argument ordering is enforced correctly, and that register allocation can
6533     // see that some registers may be assumed clobbered and have to preserve
6534     // them across calls to the intrinsic.
6535     MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHABLE_EVENT_CALL,
6536                                            DL, NodeTys, Ops);
6537     SDValue patchableNode = SDValue(MN, 0);
6538     DAG.setRoot(patchableNode);
6539     setValue(&I, patchableNode);
6540     return nullptr;
6541   }
6542   case Intrinsic::xray_typedevent: {
6543     // Here we want to make sure that the intrinsic behaves as if it has a
6544     // specific calling convention, and only for x86_64.
6545     // FIXME: Support other platforms later.
6546     const auto &Triple = DAG.getTarget().getTargetTriple();
6547     if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux())
6548       return nullptr;
6549 
6550     SDLoc DL = getCurSDLoc();
6551     SmallVector<SDValue, 8> Ops;
6552 
6553     // We want to say that we always want the arguments in registers.
6554     // It's unclear to me how manipulating the selection DAG here forces callers
6555     // to provide arguments in registers instead of on the stack.
6556     SDValue LogTypeId = getValue(I.getArgOperand(0));
6557     SDValue LogEntryVal = getValue(I.getArgOperand(1));
6558     SDValue StrSizeVal = getValue(I.getArgOperand(2));
6559     SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
6560     SDValue Chain = getRoot();
6561     Ops.push_back(LogTypeId);
6562     Ops.push_back(LogEntryVal);
6563     Ops.push_back(StrSizeVal);
6564     Ops.push_back(Chain);
6565 
6566     // We need to enforce the calling convention for the callsite, so that
6567     // argument ordering is enforced correctly, and that register allocation can
6568     // see that some registers may be assumed clobbered and have to preserve
6569     // them across calls to the intrinsic.
6570     MachineSDNode *MN = DAG.getMachineNode(
6571         TargetOpcode::PATCHABLE_TYPED_EVENT_CALL, DL, NodeTys, Ops);
6572     SDValue patchableNode = SDValue(MN, 0);
6573     DAG.setRoot(patchableNode);
6574     setValue(&I, patchableNode);
6575     return nullptr;
6576   }
6577   case Intrinsic::experimental_deoptimize:
6578     LowerDeoptimizeCall(&I);
6579     return nullptr;
6580 
6581   case Intrinsic::experimental_vector_reduce_fadd:
6582   case Intrinsic::experimental_vector_reduce_fmul:
6583   case Intrinsic::experimental_vector_reduce_add:
6584   case Intrinsic::experimental_vector_reduce_mul:
6585   case Intrinsic::experimental_vector_reduce_and:
6586   case Intrinsic::experimental_vector_reduce_or:
6587   case Intrinsic::experimental_vector_reduce_xor:
6588   case Intrinsic::experimental_vector_reduce_smax:
6589   case Intrinsic::experimental_vector_reduce_smin:
6590   case Intrinsic::experimental_vector_reduce_umax:
6591   case Intrinsic::experimental_vector_reduce_umin:
6592   case Intrinsic::experimental_vector_reduce_fmax:
6593   case Intrinsic::experimental_vector_reduce_fmin:
6594     visitVectorReduce(I, Intrinsic);
6595     return nullptr;
6596 
6597   case Intrinsic::icall_branch_funnel: {
6598     SmallVector<SDValue, 16> Ops;
6599     Ops.push_back(DAG.getRoot());
6600     Ops.push_back(getValue(I.getArgOperand(0)));
6601 
6602     int64_t Offset;
6603     auto *Base = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
6604         I.getArgOperand(1), Offset, DAG.getDataLayout()));
6605     if (!Base)
6606       report_fatal_error(
6607           "llvm.icall.branch.funnel operand must be a GlobalValue");
6608     Ops.push_back(DAG.getTargetGlobalAddress(Base, getCurSDLoc(), MVT::i64, 0));
6609 
6610     struct BranchFunnelTarget {
6611       int64_t Offset;
6612       SDValue Target;
6613     };
6614     SmallVector<BranchFunnelTarget, 8> Targets;
6615 
6616     for (unsigned Op = 1, N = I.getNumArgOperands(); Op != N; Op += 2) {
6617       auto *ElemBase = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
6618           I.getArgOperand(Op), Offset, DAG.getDataLayout()));
6619       if (ElemBase != Base)
6620         report_fatal_error("all llvm.icall.branch.funnel operands must refer "
6621                            "to the same GlobalValue");
6622 
6623       SDValue Val = getValue(I.getArgOperand(Op + 1));
6624       auto *GA = dyn_cast<GlobalAddressSDNode>(Val);
6625       if (!GA)
6626         report_fatal_error(
6627             "llvm.icall.branch.funnel operand must be a GlobalValue");
6628       Targets.push_back({Offset, DAG.getTargetGlobalAddress(
6629                                      GA->getGlobal(), getCurSDLoc(),
6630                                      Val.getValueType(), GA->getOffset())});
6631     }
6632     llvm::sort(Targets,
6633                [](const BranchFunnelTarget &T1, const BranchFunnelTarget &T2) {
6634                  return T1.Offset < T2.Offset;
6635                });
6636 
6637     for (auto &T : Targets) {
6638       Ops.push_back(DAG.getTargetConstant(T.Offset, getCurSDLoc(), MVT::i32));
6639       Ops.push_back(T.Target);
6640     }
6641 
6642     SDValue N(DAG.getMachineNode(TargetOpcode::ICALL_BRANCH_FUNNEL,
6643                                  getCurSDLoc(), MVT::Other, Ops),
6644               0);
6645     DAG.setRoot(N);
6646     setValue(&I, N);
6647     HasTailCall = true;
6648     return nullptr;
6649   }
6650 
6651   case Intrinsic::wasm_landingpad_index:
6652     // Information this intrinsic contained has been transferred to
6653     // MachineFunction in SelectionDAGISel::PrepareEHLandingPad. We can safely
6654     // delete it now.
6655     return nullptr;
6656   }
6657 }
6658 
6659 void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
6660     const ConstrainedFPIntrinsic &FPI) {
6661   SDLoc sdl = getCurSDLoc();
6662   unsigned Opcode;
6663   switch (FPI.getIntrinsicID()) {
6664   default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
6665   case Intrinsic::experimental_constrained_fadd:
6666     Opcode = ISD::STRICT_FADD;
6667     break;
6668   case Intrinsic::experimental_constrained_fsub:
6669     Opcode = ISD::STRICT_FSUB;
6670     break;
6671   case Intrinsic::experimental_constrained_fmul:
6672     Opcode = ISD::STRICT_FMUL;
6673     break;
6674   case Intrinsic::experimental_constrained_fdiv:
6675     Opcode = ISD::STRICT_FDIV;
6676     break;
6677   case Intrinsic::experimental_constrained_frem:
6678     Opcode = ISD::STRICT_FREM;
6679     break;
6680   case Intrinsic::experimental_constrained_fma:
6681     Opcode = ISD::STRICT_FMA;
6682     break;
6683   case Intrinsic::experimental_constrained_sqrt:
6684     Opcode = ISD::STRICT_FSQRT;
6685     break;
6686   case Intrinsic::experimental_constrained_pow:
6687     Opcode = ISD::STRICT_FPOW;
6688     break;
6689   case Intrinsic::experimental_constrained_powi:
6690     Opcode = ISD::STRICT_FPOWI;
6691     break;
6692   case Intrinsic::experimental_constrained_sin:
6693     Opcode = ISD::STRICT_FSIN;
6694     break;
6695   case Intrinsic::experimental_constrained_cos:
6696     Opcode = ISD::STRICT_FCOS;
6697     break;
6698   case Intrinsic::experimental_constrained_exp:
6699     Opcode = ISD::STRICT_FEXP;
6700     break;
6701   case Intrinsic::experimental_constrained_exp2:
6702     Opcode = ISD::STRICT_FEXP2;
6703     break;
6704   case Intrinsic::experimental_constrained_log:
6705     Opcode = ISD::STRICT_FLOG;
6706     break;
6707   case Intrinsic::experimental_constrained_log10:
6708     Opcode = ISD::STRICT_FLOG10;
6709     break;
6710   case Intrinsic::experimental_constrained_log2:
6711     Opcode = ISD::STRICT_FLOG2;
6712     break;
6713   case Intrinsic::experimental_constrained_rint:
6714     Opcode = ISD::STRICT_FRINT;
6715     break;
6716   case Intrinsic::experimental_constrained_nearbyint:
6717     Opcode = ISD::STRICT_FNEARBYINT;
6718     break;
6719   case Intrinsic::experimental_constrained_maxnum:
6720     Opcode = ISD::STRICT_FMAXNUM;
6721     break;
6722   case Intrinsic::experimental_constrained_minnum:
6723     Opcode = ISD::STRICT_FMINNUM;
6724     break;
6725   case Intrinsic::experimental_constrained_ceil:
6726     Opcode = ISD::STRICT_FCEIL;
6727     break;
6728   case Intrinsic::experimental_constrained_floor:
6729     Opcode = ISD::STRICT_FFLOOR;
6730     break;
6731   case Intrinsic::experimental_constrained_round:
6732     Opcode = ISD::STRICT_FROUND;
6733     break;
6734   case Intrinsic::experimental_constrained_trunc:
6735     Opcode = ISD::STRICT_FTRUNC;
6736     break;
6737   }
6738   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6739   SDValue Chain = getRoot();
6740   SmallVector<EVT, 4> ValueVTs;
6741   ComputeValueVTs(TLI, DAG.getDataLayout(), FPI.getType(), ValueVTs);
6742   ValueVTs.push_back(MVT::Other); // Out chain
6743 
6744   SDVTList VTs = DAG.getVTList(ValueVTs);
6745   SDValue Result;
6746   if (FPI.isUnaryOp())
6747     Result = DAG.getNode(Opcode, sdl, VTs,
6748                          { Chain, getValue(FPI.getArgOperand(0)) });
6749   else if (FPI.isTernaryOp())
6750     Result = DAG.getNode(Opcode, sdl, VTs,
6751                          { Chain, getValue(FPI.getArgOperand(0)),
6752                                   getValue(FPI.getArgOperand(1)),
6753                                   getValue(FPI.getArgOperand(2)) });
6754   else
6755     Result = DAG.getNode(Opcode, sdl, VTs,
6756                          { Chain, getValue(FPI.getArgOperand(0)),
6757                            getValue(FPI.getArgOperand(1))  });
6758 
6759   assert(Result.getNode()->getNumValues() == 2);
6760   SDValue OutChain = Result.getValue(1);
6761   DAG.setRoot(OutChain);
6762   SDValue FPResult = Result.getValue(0);
6763   setValue(&FPI, FPResult);
6764 }
6765 
6766 std::pair<SDValue, SDValue>
6767 SelectionDAGBuilder::lowerInvokable(TargetLowering::CallLoweringInfo &CLI,
6768                                     const BasicBlock *EHPadBB) {
6769   MachineFunction &MF = DAG.getMachineFunction();
6770   MachineModuleInfo &MMI = MF.getMMI();
6771   MCSymbol *BeginLabel = nullptr;
6772 
6773   if (EHPadBB) {
6774     // Insert a label before the invoke call to mark the try range.  This can be
6775     // used to detect deletion of the invoke via the MachineModuleInfo.
6776     BeginLabel = MMI.getContext().createTempSymbol();
6777 
6778     // For SjLj, keep track of which landing pads go with which invokes
6779     // so as to maintain the ordering of pads in the LSDA.
6780     unsigned CallSiteIndex = MMI.getCurrentCallSite();
6781     if (CallSiteIndex) {
6782       MF.setCallSiteBeginLabel(BeginLabel, CallSiteIndex);
6783       LPadToCallSiteMap[FuncInfo.MBBMap[EHPadBB]].push_back(CallSiteIndex);
6784 
6785       // Now that the call site is handled, stop tracking it.
6786       MMI.setCurrentCallSite(0);
6787     }
6788 
6789     // Both PendingLoads and PendingExports must be flushed here;
6790     // this call might not return.
6791     (void)getRoot();
6792     DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getControlRoot(), BeginLabel));
6793 
6794     CLI.setChain(getRoot());
6795   }
6796   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6797   std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
6798 
6799   assert((CLI.IsTailCall || Result.second.getNode()) &&
6800          "Non-null chain expected with non-tail call!");
6801   assert((Result.second.getNode() || !Result.first.getNode()) &&
6802          "Null value expected with tail call!");
6803 
6804   if (!Result.second.getNode()) {
6805     // As a special case, a null chain means that a tail call has been emitted
6806     // and the DAG root is already updated.
6807     HasTailCall = true;
6808 
6809     // Since there's no actual continuation from this block, nothing can be
6810     // relying on us setting vregs for them.
6811     PendingExports.clear();
6812   } else {
6813     DAG.setRoot(Result.second);
6814   }
6815 
6816   if (EHPadBB) {
6817     // Insert a label at the end of the invoke call to mark the try range.  This
6818     // can be used to detect deletion of the invoke via the MachineModuleInfo.
6819     MCSymbol *EndLabel = MMI.getContext().createTempSymbol();
6820     DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getRoot(), EndLabel));
6821 
6822     // Inform MachineModuleInfo of range.
6823     auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
6824     // There is a platform (e.g. wasm) that uses funclet style IR but does not
6825     // actually use outlined funclets and their LSDA info style.
6826     if (MF.hasEHFunclets() && isFuncletEHPersonality(Pers)) {
6827       assert(CLI.CS);
6828       WinEHFuncInfo *EHInfo = DAG.getMachineFunction().getWinEHFuncInfo();
6829       EHInfo->addIPToStateRange(cast<InvokeInst>(CLI.CS.getInstruction()),
6830                                 BeginLabel, EndLabel);
6831     } else if (!isScopedEHPersonality(Pers)) {
6832       MF.addInvoke(FuncInfo.MBBMap[EHPadBB], BeginLabel, EndLabel);
6833     }
6834   }
6835 
6836   return Result;
6837 }
6838 
6839 void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
6840                                       bool isTailCall,
6841                                       const BasicBlock *EHPadBB) {
6842   auto &DL = DAG.getDataLayout();
6843   FunctionType *FTy = CS.getFunctionType();
6844   Type *RetTy = CS.getType();
6845 
6846   TargetLowering::ArgListTy Args;
6847   Args.reserve(CS.arg_size());
6848 
6849   const Value *SwiftErrorVal = nullptr;
6850   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6851 
6852   // We can't tail call inside a function with a swifterror argument. Lowering
6853   // does not support this yet. It would have to move into the swifterror
6854   // register before the call.
6855   auto *Caller = CS.getInstruction()->getParent()->getParent();
6856   if (TLI.supportSwiftError() &&
6857       Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
6858     isTailCall = false;
6859 
6860   for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
6861        i != e; ++i) {
6862     TargetLowering::ArgListEntry Entry;
6863     const Value *V = *i;
6864 
6865     // Skip empty types
6866     if (V->getType()->isEmptyTy())
6867       continue;
6868 
6869     SDValue ArgNode = getValue(V);
6870     Entry.Node = ArgNode; Entry.Ty = V->getType();
6871 
6872     Entry.setAttributes(&CS, i - CS.arg_begin());
6873 
6874     // Use swifterror virtual register as input to the call.
6875     if (Entry.IsSwiftError && TLI.supportSwiftError()) {
6876       SwiftErrorVal = V;
6877       // We find the virtual register for the actual swifterror argument.
6878       // Instead of using the Value, we use the virtual register instead.
6879       Entry.Node = DAG.getRegister(FuncInfo
6880                                        .getOrCreateSwiftErrorVRegUseAt(
6881                                            CS.getInstruction(), FuncInfo.MBB, V)
6882                                        .first,
6883                                    EVT(TLI.getPointerTy(DL)));
6884     }
6885 
6886     Args.push_back(Entry);
6887 
6888     // If we have an explicit sret argument that is an Instruction, (i.e., it
6889     // might point to function-local memory), we can't meaningfully tail-call.
6890     if (Entry.IsSRet && isa<Instruction>(V))
6891       isTailCall = false;
6892   }
6893 
6894   // Check if target-independent constraints permit a tail call here.
6895   // Target-dependent constraints are checked within TLI->LowerCallTo.
6896   if (isTailCall && !isInTailCallPosition(CS, DAG.getTarget()))
6897     isTailCall = false;
6898 
6899   // Disable tail calls if there is an swifterror argument. Targets have not
6900   // been updated to support tail calls.
6901   if (TLI.supportSwiftError() && SwiftErrorVal)
6902     isTailCall = false;
6903 
6904   TargetLowering::CallLoweringInfo CLI(DAG);
6905   CLI.setDebugLoc(getCurSDLoc())
6906       .setChain(getRoot())
6907       .setCallee(RetTy, FTy, Callee, std::move(Args), CS)
6908       .setTailCall(isTailCall)
6909       .setConvergent(CS.isConvergent());
6910   std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
6911 
6912   if (Result.first.getNode()) {
6913     const Instruction *Inst = CS.getInstruction();
6914     Result.first = lowerRangeToAssertZExt(DAG, *Inst, Result.first);
6915     setValue(Inst, Result.first);
6916   }
6917 
6918   // The last element of CLI.InVals has the SDValue for swifterror return.
6919   // Here we copy it to a virtual register and update SwiftErrorMap for
6920   // book-keeping.
6921   if (SwiftErrorVal && TLI.supportSwiftError()) {
6922     // Get the last element of InVals.
6923     SDValue Src = CLI.InVals.back();
6924     unsigned VReg; bool CreatedVReg;
6925     std::tie(VReg, CreatedVReg) =
6926         FuncInfo.getOrCreateSwiftErrorVRegDefAt(CS.getInstruction());
6927     SDValue CopyNode = CLI.DAG.getCopyToReg(Result.second, CLI.DL, VReg, Src);
6928     // We update the virtual register for the actual swifterror argument.
6929     if (CreatedVReg)
6930       FuncInfo.setCurrentSwiftErrorVReg(FuncInfo.MBB, SwiftErrorVal, VReg);
6931     DAG.setRoot(CopyNode);
6932   }
6933 }
6934 
6935 static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
6936                              SelectionDAGBuilder &Builder) {
6937   // Check to see if this load can be trivially constant folded, e.g. if the
6938   // input is from a string literal.
6939   if (const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) {
6940     // Cast pointer to the type we really want to load.
6941     Type *LoadTy =
6942         Type::getIntNTy(PtrVal->getContext(), LoadVT.getScalarSizeInBits());
6943     if (LoadVT.isVector())
6944       LoadTy = VectorType::get(LoadTy, LoadVT.getVectorNumElements());
6945 
6946     LoadInput = ConstantExpr::getBitCast(const_cast<Constant *>(LoadInput),
6947                                          PointerType::getUnqual(LoadTy));
6948 
6949     if (const Constant *LoadCst = ConstantFoldLoadFromConstPtr(
6950             const_cast<Constant *>(LoadInput), LoadTy, *Builder.DL))
6951       return Builder.getValue(LoadCst);
6952   }
6953 
6954   // Otherwise, we have to emit the load.  If the pointer is to unfoldable but
6955   // still constant memory, the input chain can be the entry node.
6956   SDValue Root;
6957   bool ConstantMemory = false;
6958 
6959   // Do not serialize (non-volatile) loads of constant memory with anything.
6960   if (Builder.AA && Builder.AA->pointsToConstantMemory(PtrVal)) {
6961     Root = Builder.DAG.getEntryNode();
6962     ConstantMemory = true;
6963   } else {
6964     // Do not serialize non-volatile loads against each other.
6965     Root = Builder.DAG.getRoot();
6966   }
6967 
6968   SDValue Ptr = Builder.getValue(PtrVal);
6969   SDValue LoadVal = Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root,
6970                                         Ptr, MachinePointerInfo(PtrVal),
6971                                         /* Alignment = */ 1);
6972 
6973   if (!ConstantMemory)
6974     Builder.PendingLoads.push_back(LoadVal.getValue(1));
6975   return LoadVal;
6976 }
6977 
6978 /// Record the value for an instruction that produces an integer result,
6979 /// converting the type where necessary.
6980 void SelectionDAGBuilder::processIntegerCallValue(const Instruction &I,
6981                                                   SDValue Value,
6982                                                   bool IsSigned) {
6983   EVT VT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
6984                                                     I.getType(), true);
6985   if (IsSigned)
6986     Value = DAG.getSExtOrTrunc(Value, getCurSDLoc(), VT);
6987   else
6988     Value = DAG.getZExtOrTrunc(Value, getCurSDLoc(), VT);
6989   setValue(&I, Value);
6990 }
6991 
6992 /// See if we can lower a memcmp call into an optimized form. If so, return
6993 /// true and lower it. Otherwise return false, and it will be lowered like a
6994 /// normal call.
6995 /// The caller already checked that \p I calls the appropriate LibFunc with a
6996 /// correct prototype.
6997 bool SelectionDAGBuilder::visitMemCmpCall(const CallInst &I) {
6998   const Value *LHS = I.getArgOperand(0), *RHS = I.getArgOperand(1);
6999   const Value *Size = I.getArgOperand(2);
7000   const ConstantInt *CSize = dyn_cast<ConstantInt>(Size);
7001   if (CSize && CSize->getZExtValue() == 0) {
7002     EVT CallVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
7003                                                           I.getType(), true);
7004     setValue(&I, DAG.getConstant(0, getCurSDLoc(), CallVT));
7005     return true;
7006   }
7007 
7008   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7009   std::pair<SDValue, SDValue> Res = TSI.EmitTargetCodeForMemcmp(
7010       DAG, getCurSDLoc(), DAG.getRoot(), getValue(LHS), getValue(RHS),
7011       getValue(Size), MachinePointerInfo(LHS), MachinePointerInfo(RHS));
7012   if (Res.first.getNode()) {
7013     processIntegerCallValue(I, Res.first, true);
7014     PendingLoads.push_back(Res.second);
7015     return true;
7016   }
7017 
7018   // memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS)  != 0
7019   // memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS)  != 0
7020   if (!CSize || !isOnlyUsedInZeroEqualityComparison(&I))
7021     return false;
7022 
7023   // If the target has a fast compare for the given size, it will return a
7024   // preferred load type for that size. Require that the load VT is legal and
7025   // that the target supports unaligned loads of that type. Otherwise, return
7026   // INVALID.
7027   auto hasFastLoadsAndCompare = [&](unsigned NumBits) {
7028     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7029     MVT LVT = TLI.hasFastEqualityCompare(NumBits);
7030     if (LVT != MVT::INVALID_SIMPLE_VALUE_TYPE) {
7031       // TODO: Handle 5 byte compare as 4-byte + 1 byte.
7032       // TODO: Handle 8 byte compare on x86-32 as two 32-bit loads.
7033       // TODO: Check alignment of src and dest ptrs.
7034       unsigned DstAS = LHS->getType()->getPointerAddressSpace();
7035       unsigned SrcAS = RHS->getType()->getPointerAddressSpace();
7036       if (!TLI.isTypeLegal(LVT) ||
7037           !TLI.allowsMisalignedMemoryAccesses(LVT, SrcAS) ||
7038           !TLI.allowsMisalignedMemoryAccesses(LVT, DstAS))
7039         LVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
7040     }
7041 
7042     return LVT;
7043   };
7044 
7045   // This turns into unaligned loads. We only do this if the target natively
7046   // supports the MVT we'll be loading or if it is small enough (<= 4) that
7047   // we'll only produce a small number of byte loads.
7048   MVT LoadVT;
7049   unsigned NumBitsToCompare = CSize->getZExtValue() * 8;
7050   switch (NumBitsToCompare) {
7051   default:
7052     return false;
7053   case 16:
7054     LoadVT = MVT::i16;
7055     break;
7056   case 32:
7057     LoadVT = MVT::i32;
7058     break;
7059   case 64:
7060   case 128:
7061   case 256:
7062     LoadVT = hasFastLoadsAndCompare(NumBitsToCompare);
7063     break;
7064   }
7065 
7066   if (LoadVT == MVT::INVALID_SIMPLE_VALUE_TYPE)
7067     return false;
7068 
7069   SDValue LoadL = getMemCmpLoad(LHS, LoadVT, *this);
7070   SDValue LoadR = getMemCmpLoad(RHS, LoadVT, *this);
7071 
7072   // Bitcast to a wide integer type if the loads are vectors.
7073   if (LoadVT.isVector()) {
7074     EVT CmpVT = EVT::getIntegerVT(LHS->getContext(), LoadVT.getSizeInBits());
7075     LoadL = DAG.getBitcast(CmpVT, LoadL);
7076     LoadR = DAG.getBitcast(CmpVT, LoadR);
7077   }
7078 
7079   SDValue Cmp = DAG.getSetCC(getCurSDLoc(), MVT::i1, LoadL, LoadR, ISD::SETNE);
7080   processIntegerCallValue(I, Cmp, false);
7081   return true;
7082 }
7083 
7084 /// See if we can lower a memchr call into an optimized form. If so, return
7085 /// true and lower it. Otherwise return false, and it will be lowered like a
7086 /// normal call.
7087 /// The caller already checked that \p I calls the appropriate LibFunc with a
7088 /// correct prototype.
7089 bool SelectionDAGBuilder::visitMemChrCall(const CallInst &I) {
7090   const Value *Src = I.getArgOperand(0);
7091   const Value *Char = I.getArgOperand(1);
7092   const Value *Length = I.getArgOperand(2);
7093 
7094   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7095   std::pair<SDValue, SDValue> Res =
7096     TSI.EmitTargetCodeForMemchr(DAG, getCurSDLoc(), DAG.getRoot(),
7097                                 getValue(Src), getValue(Char), getValue(Length),
7098                                 MachinePointerInfo(Src));
7099   if (Res.first.getNode()) {
7100     setValue(&I, Res.first);
7101     PendingLoads.push_back(Res.second);
7102     return true;
7103   }
7104 
7105   return false;
7106 }
7107 
7108 /// See if we can lower a mempcpy call into an optimized form. If so, return
7109 /// true and lower it. Otherwise return false, and it will be lowered like a
7110 /// normal call.
7111 /// The caller already checked that \p I calls the appropriate LibFunc with a
7112 /// correct prototype.
7113 bool SelectionDAGBuilder::visitMemPCpyCall(const CallInst &I) {
7114   SDValue Dst = getValue(I.getArgOperand(0));
7115   SDValue Src = getValue(I.getArgOperand(1));
7116   SDValue Size = getValue(I.getArgOperand(2));
7117 
7118   unsigned DstAlign = DAG.InferPtrAlignment(Dst);
7119   unsigned SrcAlign = DAG.InferPtrAlignment(Src);
7120   unsigned Align = std::min(DstAlign, SrcAlign);
7121   if (Align == 0) // Alignment of one or both could not be inferred.
7122     Align = 1; // 0 and 1 both specify no alignment, but 0 is reserved.
7123 
7124   bool isVol = false;
7125   SDLoc sdl = getCurSDLoc();
7126 
7127   // In the mempcpy context we need to pass in a false value for isTailCall
7128   // because the return pointer needs to be adjusted by the size of
7129   // the copied memory.
7130   SDValue MC = DAG.getMemcpy(getRoot(), sdl, Dst, Src, Size, Align, isVol,
7131                              false, /*isTailCall=*/false,
7132                              MachinePointerInfo(I.getArgOperand(0)),
7133                              MachinePointerInfo(I.getArgOperand(1)));
7134   assert(MC.getNode() != nullptr &&
7135          "** memcpy should not be lowered as TailCall in mempcpy context **");
7136   DAG.setRoot(MC);
7137 
7138   // Check if Size needs to be truncated or extended.
7139   Size = DAG.getSExtOrTrunc(Size, sdl, Dst.getValueType());
7140 
7141   // Adjust return pointer to point just past the last dst byte.
7142   SDValue DstPlusSize = DAG.getNode(ISD::ADD, sdl, Dst.getValueType(),
7143                                     Dst, Size);
7144   setValue(&I, DstPlusSize);
7145   return true;
7146 }
7147 
7148 /// See if we can lower a strcpy call into an optimized form.  If so, return
7149 /// true and lower it, otherwise return false and it will be lowered like a
7150 /// normal call.
7151 /// The caller already checked that \p I calls the appropriate LibFunc with a
7152 /// correct prototype.
7153 bool SelectionDAGBuilder::visitStrCpyCall(const CallInst &I, bool isStpcpy) {
7154   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
7155 
7156   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7157   std::pair<SDValue, SDValue> Res =
7158     TSI.EmitTargetCodeForStrcpy(DAG, getCurSDLoc(), getRoot(),
7159                                 getValue(Arg0), getValue(Arg1),
7160                                 MachinePointerInfo(Arg0),
7161                                 MachinePointerInfo(Arg1), isStpcpy);
7162   if (Res.first.getNode()) {
7163     setValue(&I, Res.first);
7164     DAG.setRoot(Res.second);
7165     return true;
7166   }
7167 
7168   return false;
7169 }
7170 
7171 /// See if we can lower a strcmp call into an optimized form.  If so, return
7172 /// true and lower it, otherwise return false and it will be lowered like a
7173 /// normal call.
7174 /// The caller already checked that \p I calls the appropriate LibFunc with a
7175 /// correct prototype.
7176 bool SelectionDAGBuilder::visitStrCmpCall(const CallInst &I) {
7177   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
7178 
7179   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7180   std::pair<SDValue, SDValue> Res =
7181     TSI.EmitTargetCodeForStrcmp(DAG, getCurSDLoc(), DAG.getRoot(),
7182                                 getValue(Arg0), getValue(Arg1),
7183                                 MachinePointerInfo(Arg0),
7184                                 MachinePointerInfo(Arg1));
7185   if (Res.first.getNode()) {
7186     processIntegerCallValue(I, Res.first, true);
7187     PendingLoads.push_back(Res.second);
7188     return true;
7189   }
7190 
7191   return false;
7192 }
7193 
7194 /// See if we can lower a strlen call into an optimized form.  If so, return
7195 /// true and lower it, otherwise return false and it will be lowered like a
7196 /// normal call.
7197 /// The caller already checked that \p I calls the appropriate LibFunc with a
7198 /// correct prototype.
7199 bool SelectionDAGBuilder::visitStrLenCall(const CallInst &I) {
7200   const Value *Arg0 = I.getArgOperand(0);
7201 
7202   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7203   std::pair<SDValue, SDValue> Res =
7204     TSI.EmitTargetCodeForStrlen(DAG, getCurSDLoc(), DAG.getRoot(),
7205                                 getValue(Arg0), MachinePointerInfo(Arg0));
7206   if (Res.first.getNode()) {
7207     processIntegerCallValue(I, Res.first, false);
7208     PendingLoads.push_back(Res.second);
7209     return true;
7210   }
7211 
7212   return false;
7213 }
7214 
7215 /// See if we can lower a strnlen call into an optimized form.  If so, return
7216 /// true and lower it, otherwise return false and it will be lowered like a
7217 /// normal call.
7218 /// The caller already checked that \p I calls the appropriate LibFunc with a
7219 /// correct prototype.
7220 bool SelectionDAGBuilder::visitStrNLenCall(const CallInst &I) {
7221   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
7222 
7223   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7224   std::pair<SDValue, SDValue> Res =
7225     TSI.EmitTargetCodeForStrnlen(DAG, getCurSDLoc(), DAG.getRoot(),
7226                                  getValue(Arg0), getValue(Arg1),
7227                                  MachinePointerInfo(Arg0));
7228   if (Res.first.getNode()) {
7229     processIntegerCallValue(I, Res.first, false);
7230     PendingLoads.push_back(Res.second);
7231     return true;
7232   }
7233 
7234   return false;
7235 }
7236 
7237 /// See if we can lower a unary floating-point operation into an SDNode with
7238 /// the specified Opcode.  If so, return true and lower it, otherwise return
7239 /// false and it will be lowered like a normal call.
7240 /// The caller already checked that \p I calls the appropriate LibFunc with a
7241 /// correct prototype.
7242 bool SelectionDAGBuilder::visitUnaryFloatCall(const CallInst &I,
7243                                               unsigned Opcode) {
7244   // We already checked this call's prototype; verify it doesn't modify errno.
7245   if (!I.onlyReadsMemory())
7246     return false;
7247 
7248   SDValue Tmp = getValue(I.getArgOperand(0));
7249   setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), Tmp.getValueType(), Tmp));
7250   return true;
7251 }
7252 
7253 /// See if we can lower a binary floating-point operation into an SDNode with
7254 /// the specified Opcode. If so, return true and lower it. Otherwise return
7255 /// false, and it will be lowered like a normal call.
7256 /// The caller already checked that \p I calls the appropriate LibFunc with a
7257 /// correct prototype.
7258 bool SelectionDAGBuilder::visitBinaryFloatCall(const CallInst &I,
7259                                                unsigned Opcode) {
7260   // We already checked this call's prototype; verify it doesn't modify errno.
7261   if (!I.onlyReadsMemory())
7262     return false;
7263 
7264   SDValue Tmp0 = getValue(I.getArgOperand(0));
7265   SDValue Tmp1 = getValue(I.getArgOperand(1));
7266   EVT VT = Tmp0.getValueType();
7267   setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), VT, Tmp0, Tmp1));
7268   return true;
7269 }
7270 
7271 void SelectionDAGBuilder::visitCall(const CallInst &I) {
7272   // Handle inline assembly differently.
7273   if (isa<InlineAsm>(I.getCalledValue())) {
7274     visitInlineAsm(&I);
7275     return;
7276   }
7277 
7278   const char *RenameFn = nullptr;
7279   if (Function *F = I.getCalledFunction()) {
7280     if (F->isDeclaration()) {
7281       // Is this an LLVM intrinsic or a target-specific intrinsic?
7282       unsigned IID = F->getIntrinsicID();
7283       if (!IID)
7284         if (const TargetIntrinsicInfo *II = TM.getIntrinsicInfo())
7285           IID = II->getIntrinsicID(F);
7286 
7287       if (IID) {
7288         RenameFn = visitIntrinsicCall(I, IID);
7289         if (!RenameFn)
7290           return;
7291       }
7292     }
7293 
7294     // Check for well-known libc/libm calls.  If the function is internal, it
7295     // can't be a library call.  Don't do the check if marked as nobuiltin for
7296     // some reason or the call site requires strict floating point semantics.
7297     LibFunc Func;
7298     if (!I.isNoBuiltin() && !I.isStrictFP() && !F->hasLocalLinkage() &&
7299         F->hasName() && LibInfo->getLibFunc(*F, Func) &&
7300         LibInfo->hasOptimizedCodeGen(Func)) {
7301       switch (Func) {
7302       default: break;
7303       case LibFunc_copysign:
7304       case LibFunc_copysignf:
7305       case LibFunc_copysignl:
7306         // We already checked this call's prototype; verify it doesn't modify
7307         // errno.
7308         if (I.onlyReadsMemory()) {
7309           SDValue LHS = getValue(I.getArgOperand(0));
7310           SDValue RHS = getValue(I.getArgOperand(1));
7311           setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurSDLoc(),
7312                                    LHS.getValueType(), LHS, RHS));
7313           return;
7314         }
7315         break;
7316       case LibFunc_fabs:
7317       case LibFunc_fabsf:
7318       case LibFunc_fabsl:
7319         if (visitUnaryFloatCall(I, ISD::FABS))
7320           return;
7321         break;
7322       case LibFunc_fmin:
7323       case LibFunc_fminf:
7324       case LibFunc_fminl:
7325         if (visitBinaryFloatCall(I, ISD::FMINNUM))
7326           return;
7327         break;
7328       case LibFunc_fmax:
7329       case LibFunc_fmaxf:
7330       case LibFunc_fmaxl:
7331         if (visitBinaryFloatCall(I, ISD::FMAXNUM))
7332           return;
7333         break;
7334       case LibFunc_sin:
7335       case LibFunc_sinf:
7336       case LibFunc_sinl:
7337         if (visitUnaryFloatCall(I, ISD::FSIN))
7338           return;
7339         break;
7340       case LibFunc_cos:
7341       case LibFunc_cosf:
7342       case LibFunc_cosl:
7343         if (visitUnaryFloatCall(I, ISD::FCOS))
7344           return;
7345         break;
7346       case LibFunc_sqrt:
7347       case LibFunc_sqrtf:
7348       case LibFunc_sqrtl:
7349       case LibFunc_sqrt_finite:
7350       case LibFunc_sqrtf_finite:
7351       case LibFunc_sqrtl_finite:
7352         if (visitUnaryFloatCall(I, ISD::FSQRT))
7353           return;
7354         break;
7355       case LibFunc_floor:
7356       case LibFunc_floorf:
7357       case LibFunc_floorl:
7358         if (visitUnaryFloatCall(I, ISD::FFLOOR))
7359           return;
7360         break;
7361       case LibFunc_nearbyint:
7362       case LibFunc_nearbyintf:
7363       case LibFunc_nearbyintl:
7364         if (visitUnaryFloatCall(I, ISD::FNEARBYINT))
7365           return;
7366         break;
7367       case LibFunc_ceil:
7368       case LibFunc_ceilf:
7369       case LibFunc_ceill:
7370         if (visitUnaryFloatCall(I, ISD::FCEIL))
7371           return;
7372         break;
7373       case LibFunc_rint:
7374       case LibFunc_rintf:
7375       case LibFunc_rintl:
7376         if (visitUnaryFloatCall(I, ISD::FRINT))
7377           return;
7378         break;
7379       case LibFunc_round:
7380       case LibFunc_roundf:
7381       case LibFunc_roundl:
7382         if (visitUnaryFloatCall(I, ISD::FROUND))
7383           return;
7384         break;
7385       case LibFunc_trunc:
7386       case LibFunc_truncf:
7387       case LibFunc_truncl:
7388         if (visitUnaryFloatCall(I, ISD::FTRUNC))
7389           return;
7390         break;
7391       case LibFunc_log2:
7392       case LibFunc_log2f:
7393       case LibFunc_log2l:
7394         if (visitUnaryFloatCall(I, ISD::FLOG2))
7395           return;
7396         break;
7397       case LibFunc_exp2:
7398       case LibFunc_exp2f:
7399       case LibFunc_exp2l:
7400         if (visitUnaryFloatCall(I, ISD::FEXP2))
7401           return;
7402         break;
7403       case LibFunc_memcmp:
7404         if (visitMemCmpCall(I))
7405           return;
7406         break;
7407       case LibFunc_mempcpy:
7408         if (visitMemPCpyCall(I))
7409           return;
7410         break;
7411       case LibFunc_memchr:
7412         if (visitMemChrCall(I))
7413           return;
7414         break;
7415       case LibFunc_strcpy:
7416         if (visitStrCpyCall(I, false))
7417           return;
7418         break;
7419       case LibFunc_stpcpy:
7420         if (visitStrCpyCall(I, true))
7421           return;
7422         break;
7423       case LibFunc_strcmp:
7424         if (visitStrCmpCall(I))
7425           return;
7426         break;
7427       case LibFunc_strlen:
7428         if (visitStrLenCall(I))
7429           return;
7430         break;
7431       case LibFunc_strnlen:
7432         if (visitStrNLenCall(I))
7433           return;
7434         break;
7435       }
7436     }
7437   }
7438 
7439   SDValue Callee;
7440   if (!RenameFn)
7441     Callee = getValue(I.getCalledValue());
7442   else
7443     Callee = DAG.getExternalSymbol(
7444         RenameFn,
7445         DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()));
7446 
7447   // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
7448   // have to do anything here to lower funclet bundles.
7449   assert(!I.hasOperandBundlesOtherThan(
7450              {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) &&
7451          "Cannot lower calls with arbitrary operand bundles!");
7452 
7453   if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
7454     LowerCallSiteWithDeoptBundle(&I, Callee, nullptr);
7455   else
7456     // Check if we can potentially perform a tail call. More detailed checking
7457     // is be done within LowerCallTo, after more information about the call is
7458     // known.
7459     LowerCallTo(&I, Callee, I.isTailCall());
7460 }
7461 
7462 namespace {
7463 
7464 /// AsmOperandInfo - This contains information for each constraint that we are
7465 /// lowering.
7466 class SDISelAsmOperandInfo : public TargetLowering::AsmOperandInfo {
7467 public:
7468   /// CallOperand - If this is the result output operand or a clobber
7469   /// this is null, otherwise it is the incoming operand to the CallInst.
7470   /// This gets modified as the asm is processed.
7471   SDValue CallOperand;
7472 
7473   /// AssignedRegs - If this is a register or register class operand, this
7474   /// contains the set of register corresponding to the operand.
7475   RegsForValue AssignedRegs;
7476 
7477   explicit SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &info)
7478     : TargetLowering::AsmOperandInfo(info), CallOperand(nullptr, 0) {
7479   }
7480 
7481   /// Whether or not this operand accesses memory
7482   bool hasMemory(const TargetLowering &TLI) const {
7483     // Indirect operand accesses access memory.
7484     if (isIndirect)
7485       return true;
7486 
7487     for (const auto &Code : Codes)
7488       if (TLI.getConstraintType(Code) == TargetLowering::C_Memory)
7489         return true;
7490 
7491     return false;
7492   }
7493 
7494   /// getCallOperandValEVT - Return the EVT of the Value* that this operand
7495   /// corresponds to.  If there is no Value* for this operand, it returns
7496   /// MVT::Other.
7497   EVT getCallOperandValEVT(LLVMContext &Context, const TargetLowering &TLI,
7498                            const DataLayout &DL) const {
7499     if (!CallOperandVal) return MVT::Other;
7500 
7501     if (isa<BasicBlock>(CallOperandVal))
7502       return TLI.getPointerTy(DL);
7503 
7504     llvm::Type *OpTy = CallOperandVal->getType();
7505 
7506     // FIXME: code duplicated from TargetLowering::ParseConstraints().
7507     // If this is an indirect operand, the operand is a pointer to the
7508     // accessed type.
7509     if (isIndirect) {
7510       PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
7511       if (!PtrTy)
7512         report_fatal_error("Indirect operand for inline asm not a pointer!");
7513       OpTy = PtrTy->getElementType();
7514     }
7515 
7516     // Look for vector wrapped in a struct. e.g. { <16 x i8> }.
7517     if (StructType *STy = dyn_cast<StructType>(OpTy))
7518       if (STy->getNumElements() == 1)
7519         OpTy = STy->getElementType(0);
7520 
7521     // If OpTy is not a single value, it may be a struct/union that we
7522     // can tile with integers.
7523     if (!OpTy->isSingleValueType() && OpTy->isSized()) {
7524       unsigned BitSize = DL.getTypeSizeInBits(OpTy);
7525       switch (BitSize) {
7526       default: break;
7527       case 1:
7528       case 8:
7529       case 16:
7530       case 32:
7531       case 64:
7532       case 128:
7533         OpTy = IntegerType::get(Context, BitSize);
7534         break;
7535       }
7536     }
7537 
7538     return TLI.getValueType(DL, OpTy, true);
7539   }
7540 };
7541 
7542 using SDISelAsmOperandInfoVector = SmallVector<SDISelAsmOperandInfo, 16>;
7543 
7544 } // end anonymous namespace
7545 
7546 /// Make sure that the output operand \p OpInfo and its corresponding input
7547 /// operand \p MatchingOpInfo have compatible constraint types (otherwise error
7548 /// out).
7549 static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo,
7550                                SDISelAsmOperandInfo &MatchingOpInfo,
7551                                SelectionDAG &DAG) {
7552   if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT)
7553     return;
7554 
7555   const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
7556   const auto &TLI = DAG.getTargetLoweringInfo();
7557 
7558   std::pair<unsigned, const TargetRegisterClass *> MatchRC =
7559       TLI.getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode,
7560                                        OpInfo.ConstraintVT);
7561   std::pair<unsigned, const TargetRegisterClass *> InputRC =
7562       TLI.getRegForInlineAsmConstraint(TRI, MatchingOpInfo.ConstraintCode,
7563                                        MatchingOpInfo.ConstraintVT);
7564   if ((OpInfo.ConstraintVT.isInteger() !=
7565        MatchingOpInfo.ConstraintVT.isInteger()) ||
7566       (MatchRC.second != InputRC.second)) {
7567     // FIXME: error out in a more elegant fashion
7568     report_fatal_error("Unsupported asm: input constraint"
7569                        " with a matching output constraint of"
7570                        " incompatible type!");
7571   }
7572   MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT;
7573 }
7574 
7575 /// Get a direct memory input to behave well as an indirect operand.
7576 /// This may introduce stores, hence the need for a \p Chain.
7577 /// \return The (possibly updated) chain.
7578 static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location,
7579                                         SDISelAsmOperandInfo &OpInfo,
7580                                         SelectionDAG &DAG) {
7581   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7582 
7583   // If we don't have an indirect input, put it in the constpool if we can,
7584   // otherwise spill it to a stack slot.
7585   // TODO: This isn't quite right. We need to handle these according to
7586   // the addressing mode that the constraint wants. Also, this may take
7587   // an additional register for the computation and we don't want that
7588   // either.
7589 
7590   // If the operand is a float, integer, or vector constant, spill to a
7591   // constant pool entry to get its address.
7592   const Value *OpVal = OpInfo.CallOperandVal;
7593   if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
7594       isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) {
7595     OpInfo.CallOperand = DAG.getConstantPool(
7596         cast<Constant>(OpVal), TLI.getPointerTy(DAG.getDataLayout()));
7597     return Chain;
7598   }
7599 
7600   // Otherwise, create a stack slot and emit a store to it before the asm.
7601   Type *Ty = OpVal->getType();
7602   auto &DL = DAG.getDataLayout();
7603   uint64_t TySize = DL.getTypeAllocSize(Ty);
7604   unsigned Align = DL.getPrefTypeAlignment(Ty);
7605   MachineFunction &MF = DAG.getMachineFunction();
7606   int SSFI = MF.getFrameInfo().CreateStackObject(TySize, Align, false);
7607   SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getFrameIndexTy(DL));
7608   Chain = DAG.getStore(Chain, Location, OpInfo.CallOperand, StackSlot,
7609                        MachinePointerInfo::getFixedStack(MF, SSFI));
7610   OpInfo.CallOperand = StackSlot;
7611 
7612   return Chain;
7613 }
7614 
7615 /// GetRegistersForValue - Assign registers (virtual or physical) for the
7616 /// specified operand.  We prefer to assign virtual registers, to allow the
7617 /// register allocator to handle the assignment process.  However, if the asm
7618 /// uses features that we can't model on machineinstrs, we have SDISel do the
7619 /// allocation.  This produces generally horrible, but correct, code.
7620 ///
7621 ///   OpInfo describes the operand
7622 ///   RefOpInfo describes the matching operand if any, the operand otherwise
7623 static void GetRegistersForValue(SelectionDAG &DAG, const SDLoc &DL,
7624                                  SDISelAsmOperandInfo &OpInfo,
7625                                  SDISelAsmOperandInfo &RefOpInfo) {
7626   LLVMContext &Context = *DAG.getContext();
7627   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7628 
7629   MachineFunction &MF = DAG.getMachineFunction();
7630   SmallVector<unsigned, 4> Regs;
7631   const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
7632 
7633   // No work to do for memory operations.
7634   if (OpInfo.ConstraintType == TargetLowering::C_Memory)
7635     return;
7636 
7637   // If this is a constraint for a single physreg, or a constraint for a
7638   // register class, find it.
7639   unsigned AssignedReg;
7640   const TargetRegisterClass *RC;
7641   std::tie(AssignedReg, RC) = TLI.getRegForInlineAsmConstraint(
7642       &TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT);
7643   // RC is unset only on failure. Return immediately.
7644   if (!RC)
7645     return;
7646 
7647   // Get the actual register value type.  This is important, because the user
7648   // may have asked for (e.g.) the AX register in i32 type.  We need to
7649   // remember that AX is actually i16 to get the right extension.
7650   const MVT RegVT = *TRI.legalclasstypes_begin(*RC);
7651 
7652   if (OpInfo.ConstraintVT != MVT::Other) {
7653     // If this is an FP operand in an integer register (or visa versa), or more
7654     // generally if the operand value disagrees with the register class we plan
7655     // to stick it in, fix the operand type.
7656     //
7657     // If this is an input value, the bitcast to the new type is done now.
7658     // Bitcast for output value is done at the end of visitInlineAsm().
7659     if ((OpInfo.Type == InlineAsm::isOutput ||
7660          OpInfo.Type == InlineAsm::isInput) &&
7661         !TRI.isTypeLegalForClass(*RC, OpInfo.ConstraintVT)) {
7662       // Try to convert to the first EVT that the reg class contains.  If the
7663       // types are identical size, use a bitcast to convert (e.g. two differing
7664       // vector types).  Note: output bitcast is done at the end of
7665       // visitInlineAsm().
7666       if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
7667         // Exclude indirect inputs while they are unsupported because the code
7668         // to perform the load is missing and thus OpInfo.CallOperand still
7669         // refers to the input address rather than the pointed-to value.
7670         if (OpInfo.Type == InlineAsm::isInput && !OpInfo.isIndirect)
7671           OpInfo.CallOperand =
7672               DAG.getNode(ISD::BITCAST, DL, RegVT, OpInfo.CallOperand);
7673         OpInfo.ConstraintVT = RegVT;
7674         // If the operand is an FP value and we want it in integer registers,
7675         // use the corresponding integer type. This turns an f64 value into
7676         // i64, which can be passed with two i32 values on a 32-bit machine.
7677       } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
7678         MVT VT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits());
7679         if (OpInfo.Type == InlineAsm::isInput)
7680           OpInfo.CallOperand =
7681               DAG.getNode(ISD::BITCAST, DL, VT, OpInfo.CallOperand);
7682         OpInfo.ConstraintVT = VT;
7683       }
7684     }
7685   }
7686 
7687   // No need to allocate a matching input constraint since the constraint it's
7688   // matching to has already been allocated.
7689   if (OpInfo.isMatchingInputConstraint())
7690     return;
7691 
7692   EVT ValueVT = OpInfo.ConstraintVT;
7693   if (OpInfo.ConstraintVT == MVT::Other)
7694     ValueVT = RegVT;
7695 
7696   // Initialize NumRegs.
7697   unsigned NumRegs = 1;
7698   if (OpInfo.ConstraintVT != MVT::Other)
7699     NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT);
7700 
7701   // If this is a constraint for a specific physical register, like {r17},
7702   // assign it now.
7703 
7704   // If this associated to a specific register, initialize iterator to correct
7705   // place. If virtual, make sure we have enough registers
7706 
7707   // Initialize iterator if necessary
7708   TargetRegisterClass::iterator I = RC->begin();
7709   MachineRegisterInfo &RegInfo = MF.getRegInfo();
7710 
7711   // Do not check for single registers.
7712   if (AssignedReg) {
7713       for (; *I != AssignedReg; ++I)
7714         assert(I != RC->end() && "AssignedReg should be member of RC");
7715   }
7716 
7717   for (; NumRegs; --NumRegs, ++I) {
7718     assert(I != RC->end() && "Ran out of registers to allocate!");
7719     auto R = (AssignedReg) ? *I : RegInfo.createVirtualRegister(RC);
7720     Regs.push_back(R);
7721   }
7722 
7723   OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
7724 }
7725 
7726 static unsigned
7727 findMatchingInlineAsmOperand(unsigned OperandNo,
7728                              const std::vector<SDValue> &AsmNodeOperands) {
7729   // Scan until we find the definition we already emitted of this operand.
7730   unsigned CurOp = InlineAsm::Op_FirstOperand;
7731   for (; OperandNo; --OperandNo) {
7732     // Advance to the next operand.
7733     unsigned OpFlag =
7734         cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
7735     assert((InlineAsm::isRegDefKind(OpFlag) ||
7736             InlineAsm::isRegDefEarlyClobberKind(OpFlag) ||
7737             InlineAsm::isMemKind(OpFlag)) &&
7738            "Skipped past definitions?");
7739     CurOp += InlineAsm::getNumOperandRegisters(OpFlag) + 1;
7740   }
7741   return CurOp;
7742 }
7743 
7744 namespace {
7745 
7746 class ExtraFlags {
7747   unsigned Flags = 0;
7748 
7749 public:
7750   explicit ExtraFlags(ImmutableCallSite CS) {
7751     const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
7752     if (IA->hasSideEffects())
7753       Flags |= InlineAsm::Extra_HasSideEffects;
7754     if (IA->isAlignStack())
7755       Flags |= InlineAsm::Extra_IsAlignStack;
7756     if (CS.isConvergent())
7757       Flags |= InlineAsm::Extra_IsConvergent;
7758     Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
7759   }
7760 
7761   void update(const TargetLowering::AsmOperandInfo &OpInfo) {
7762     // Ideally, we would only check against memory constraints.  However, the
7763     // meaning of an Other constraint can be target-specific and we can't easily
7764     // reason about it.  Therefore, be conservative and set MayLoad/MayStore
7765     // for Other constraints as well.
7766     if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
7767         OpInfo.ConstraintType == TargetLowering::C_Other) {
7768       if (OpInfo.Type == InlineAsm::isInput)
7769         Flags |= InlineAsm::Extra_MayLoad;
7770       else if (OpInfo.Type == InlineAsm::isOutput)
7771         Flags |= InlineAsm::Extra_MayStore;
7772       else if (OpInfo.Type == InlineAsm::isClobber)
7773         Flags |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore);
7774     }
7775   }
7776 
7777   unsigned get() const { return Flags; }
7778 };
7779 
7780 } // end anonymous namespace
7781 
7782 /// visitInlineAsm - Handle a call to an InlineAsm object.
7783 void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
7784   const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
7785 
7786   /// ConstraintOperands - Information about all of the constraints.
7787   SDISelAsmOperandInfoVector ConstraintOperands;
7788 
7789   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7790   TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints(
7791       DAG.getDataLayout(), DAG.getSubtarget().getRegisterInfo(), CS);
7792 
7793   // First Pass: Calculate HasSideEffects and ExtraFlags (AlignStack,
7794   // AsmDialect, MayLoad, MayStore).
7795   bool HasSideEffect = IA->hasSideEffects();
7796   ExtraFlags ExtraInfo(CS);
7797 
7798   unsigned ArgNo = 0;   // ArgNo - The argument of the CallInst.
7799   unsigned ResNo = 0;   // ResNo - The result number of the next output.
7800   for (auto &T : TargetConstraints) {
7801     ConstraintOperands.push_back(SDISelAsmOperandInfo(T));
7802     SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
7803 
7804     // Compute the value type for each operand.
7805     if (OpInfo.Type == InlineAsm::isInput ||
7806         (OpInfo.Type == InlineAsm::isOutput && OpInfo.isIndirect)) {
7807       OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
7808 
7809       // Process the call argument. BasicBlocks are labels, currently appearing
7810       // only in asm's.
7811       const Instruction *I = CS.getInstruction();
7812       if (isa<CallBrInst>(I) &&
7813           (ArgNo - 1) >= (cast<CallBrInst>(I)->getNumArgOperands() -
7814                           cast<CallBrInst>(I)->getNumIndirectDests())) {
7815         const auto *BA = cast<BlockAddress>(OpInfo.CallOperandVal);
7816         EVT VT = TLI.getValueType(DAG.getDataLayout(), BA->getType(), true);
7817         OpInfo.CallOperand = DAG.getTargetBlockAddress(BA, VT);
7818       } else if (const auto *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
7819         OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
7820       } else {
7821         OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
7822       }
7823 
7824       OpInfo.ConstraintVT =
7825           OpInfo
7826               .getCallOperandValEVT(*DAG.getContext(), TLI, DAG.getDataLayout())
7827               .getSimpleVT();
7828     } else if (OpInfo.Type == InlineAsm::isOutput && !OpInfo.isIndirect) {
7829       // The return value of the call is this value.  As such, there is no
7830       // corresponding argument.
7831       assert(!CS.getType()->isVoidTy() && "Bad inline asm!");
7832       if (StructType *STy = dyn_cast<StructType>(CS.getType())) {
7833         OpInfo.ConstraintVT = TLI.getSimpleValueType(
7834             DAG.getDataLayout(), STy->getElementType(ResNo));
7835       } else {
7836         assert(ResNo == 0 && "Asm only has one result!");
7837         OpInfo.ConstraintVT =
7838             TLI.getSimpleValueType(DAG.getDataLayout(), CS.getType());
7839       }
7840       ++ResNo;
7841     } else {
7842       OpInfo.ConstraintVT = MVT::Other;
7843     }
7844 
7845     if (!HasSideEffect)
7846       HasSideEffect = OpInfo.hasMemory(TLI);
7847 
7848     // Determine if this InlineAsm MayLoad or MayStore based on the constraints.
7849     // FIXME: Could we compute this on OpInfo rather than T?
7850 
7851     // Compute the constraint code and ConstraintType to use.
7852     TLI.ComputeConstraintToUse(T, SDValue());
7853 
7854     ExtraInfo.update(T);
7855   }
7856 
7857   // We won't need to flush pending loads if this asm doesn't touch
7858   // memory and is nonvolatile.
7859   SDValue Flag, Chain = (HasSideEffect) ? getRoot() : DAG.getRoot();
7860 
7861   // Second pass over the constraints: compute which constraint option to use.
7862   for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
7863     // If this is an output operand with a matching input operand, look up the
7864     // matching input. If their types mismatch, e.g. one is an integer, the
7865     // other is floating point, or their sizes are different, flag it as an
7866     // error.
7867     if (OpInfo.hasMatchingInput()) {
7868       SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
7869       patchMatchingInput(OpInfo, Input, DAG);
7870     }
7871 
7872     // Compute the constraint code and ConstraintType to use.
7873     TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
7874 
7875     if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
7876         OpInfo.Type == InlineAsm::isClobber)
7877       continue;
7878 
7879     // If this is a memory input, and if the operand is not indirect, do what we
7880     // need to provide an address for the memory input.
7881     if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
7882         !OpInfo.isIndirect) {
7883       assert((OpInfo.isMultipleAlternative ||
7884               (OpInfo.Type == InlineAsm::isInput)) &&
7885              "Can only indirectify direct input operands!");
7886 
7887       // Memory operands really want the address of the value.
7888       Chain = getAddressForMemoryInput(Chain, getCurSDLoc(), OpInfo, DAG);
7889 
7890       // There is no longer a Value* corresponding to this operand.
7891       OpInfo.CallOperandVal = nullptr;
7892 
7893       // It is now an indirect operand.
7894       OpInfo.isIndirect = true;
7895     }
7896 
7897   }
7898 
7899   // AsmNodeOperands - The operands for the ISD::INLINEASM node.
7900   std::vector<SDValue> AsmNodeOperands;
7901   AsmNodeOperands.push_back(SDValue());  // reserve space for input chain
7902   AsmNodeOperands.push_back(DAG.getTargetExternalSymbol(
7903       IA->getAsmString().c_str(), TLI.getPointerTy(DAG.getDataLayout())));
7904 
7905   // If we have a !srcloc metadata node associated with it, we want to attach
7906   // this to the ultimately generated inline asm machineinstr.  To do this, we
7907   // pass in the third operand as this (potentially null) inline asm MDNode.
7908   const MDNode *SrcLoc = CS.getInstruction()->getMetadata("srcloc");
7909   AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc));
7910 
7911   // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore
7912   // bits as operand 3.
7913   AsmNodeOperands.push_back(DAG.getTargetConstant(
7914       ExtraInfo.get(), getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
7915 
7916   // Third pass: Loop over operands to prepare DAG-level operands.. As part of
7917   // this, assign virtual and physical registers for inputs and otput.
7918   for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
7919     // Assign Registers.
7920     SDISelAsmOperandInfo &RefOpInfo =
7921         OpInfo.isMatchingInputConstraint()
7922             ? ConstraintOperands[OpInfo.getMatchedOperand()]
7923             : OpInfo;
7924     GetRegistersForValue(DAG, getCurSDLoc(), OpInfo, RefOpInfo);
7925 
7926     switch (OpInfo.Type) {
7927     case InlineAsm::isOutput:
7928       if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
7929           (OpInfo.ConstraintType == TargetLowering::C_Other &&
7930            OpInfo.isIndirect)) {
7931         unsigned ConstraintID =
7932             TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
7933         assert(ConstraintID != InlineAsm::Constraint_Unknown &&
7934                "Failed to convert memory constraint code to constraint id.");
7935 
7936         // Add information to the INLINEASM node to know about this output.
7937         unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
7938         OpFlags = InlineAsm::getFlagWordForMem(OpFlags, ConstraintID);
7939         AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlags, getCurSDLoc(),
7940                                                         MVT::i32));
7941         AsmNodeOperands.push_back(OpInfo.CallOperand);
7942         break;
7943       } else if ((OpInfo.ConstraintType == TargetLowering::C_Other &&
7944                   !OpInfo.isIndirect) ||
7945                  OpInfo.ConstraintType == TargetLowering::C_Register ||
7946                  OpInfo.ConstraintType == TargetLowering::C_RegisterClass) {
7947         // Otherwise, this outputs to a register (directly for C_Register /
7948         // C_RegisterClass, and a target-defined fashion for C_Other). Find a
7949         // register that we can use.
7950         if (OpInfo.AssignedRegs.Regs.empty()) {
7951           emitInlineAsmError(
7952               CS, "couldn't allocate output register for constraint '" +
7953                       Twine(OpInfo.ConstraintCode) + "'");
7954           return;
7955         }
7956 
7957         // Add information to the INLINEASM node to know that this register is
7958         // set.
7959         OpInfo.AssignedRegs.AddInlineAsmOperands(
7960             OpInfo.isEarlyClobber ? InlineAsm::Kind_RegDefEarlyClobber
7961                                   : InlineAsm::Kind_RegDef,
7962             false, 0, getCurSDLoc(), DAG, AsmNodeOperands);
7963       }
7964       break;
7965 
7966     case InlineAsm::isInput: {
7967       SDValue InOperandVal = OpInfo.CallOperand;
7968 
7969       if (OpInfo.isMatchingInputConstraint()) {
7970         // If this is required to match an output register we have already set,
7971         // just use its register.
7972         auto CurOp = findMatchingInlineAsmOperand(OpInfo.getMatchedOperand(),
7973                                                   AsmNodeOperands);
7974         unsigned OpFlag =
7975           cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
7976         if (InlineAsm::isRegDefKind(OpFlag) ||
7977             InlineAsm::isRegDefEarlyClobberKind(OpFlag)) {
7978           // Add (OpFlag&0xffff)>>3 registers to MatchedRegs.
7979           if (OpInfo.isIndirect) {
7980             // This happens on gcc/testsuite/gcc.dg/pr8788-1.c
7981             emitInlineAsmError(CS, "inline asm not supported yet:"
7982                                    " don't know how to handle tied "
7983                                    "indirect register inputs");
7984             return;
7985           }
7986 
7987           MVT RegVT = AsmNodeOperands[CurOp+1].getSimpleValueType();
7988           SmallVector<unsigned, 4> Regs;
7989 
7990           if (const TargetRegisterClass *RC = TLI.getRegClassFor(RegVT)) {
7991             unsigned NumRegs = InlineAsm::getNumOperandRegisters(OpFlag);
7992             MachineRegisterInfo &RegInfo =
7993                 DAG.getMachineFunction().getRegInfo();
7994             for (unsigned i = 0; i != NumRegs; ++i)
7995               Regs.push_back(RegInfo.createVirtualRegister(RC));
7996           } else {
7997             emitInlineAsmError(CS, "inline asm error: This value type register "
7998                                    "class is not natively supported!");
7999             return;
8000           }
8001 
8002           RegsForValue MatchedRegs(Regs, RegVT, InOperandVal.getValueType());
8003 
8004           SDLoc dl = getCurSDLoc();
8005           // Use the produced MatchedRegs object to
8006           MatchedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Flag,
8007                                     CS.getInstruction());
8008           MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse,
8009                                            true, OpInfo.getMatchedOperand(), dl,
8010                                            DAG, AsmNodeOperands);
8011           break;
8012         }
8013 
8014         assert(InlineAsm::isMemKind(OpFlag) && "Unknown matching constraint!");
8015         assert(InlineAsm::getNumOperandRegisters(OpFlag) == 1 &&
8016                "Unexpected number of operands");
8017         // Add information to the INLINEASM node to know about this input.
8018         // See InlineAsm.h isUseOperandTiedToDef.
8019         OpFlag = InlineAsm::convertMemFlagWordToMatchingFlagWord(OpFlag);
8020         OpFlag = InlineAsm::getFlagWordForMatchingOp(OpFlag,
8021                                                     OpInfo.getMatchedOperand());
8022         AsmNodeOperands.push_back(DAG.getTargetConstant(
8023             OpFlag, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
8024         AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
8025         break;
8026       }
8027 
8028       // Treat indirect 'X' constraint as memory.
8029       if (OpInfo.ConstraintType == TargetLowering::C_Other &&
8030           OpInfo.isIndirect)
8031         OpInfo.ConstraintType = TargetLowering::C_Memory;
8032 
8033       if (OpInfo.ConstraintType == TargetLowering::C_Other) {
8034         std::vector<SDValue> Ops;
8035         TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode,
8036                                           Ops, DAG);
8037         if (Ops.empty()) {
8038           emitInlineAsmError(CS, "invalid operand for inline asm constraint '" +
8039                                      Twine(OpInfo.ConstraintCode) + "'");
8040           return;
8041         }
8042 
8043         // Add information to the INLINEASM node to know about this input.
8044         unsigned ResOpType =
8045           InlineAsm::getFlagWord(InlineAsm::Kind_Imm, Ops.size());
8046         AsmNodeOperands.push_back(DAG.getTargetConstant(
8047             ResOpType, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
8048         AsmNodeOperands.insert(AsmNodeOperands.end(), Ops.begin(), Ops.end());
8049         break;
8050       }
8051 
8052       if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
8053         assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
8054         assert(InOperandVal.getValueType() ==
8055                    TLI.getPointerTy(DAG.getDataLayout()) &&
8056                "Memory operands expect pointer values");
8057 
8058         unsigned ConstraintID =
8059             TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
8060         assert(ConstraintID != InlineAsm::Constraint_Unknown &&
8061                "Failed to convert memory constraint code to constraint id.");
8062 
8063         // Add information to the INLINEASM node to know about this input.
8064         unsigned ResOpType = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
8065         ResOpType = InlineAsm::getFlagWordForMem(ResOpType, ConstraintID);
8066         AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
8067                                                         getCurSDLoc(),
8068                                                         MVT::i32));
8069         AsmNodeOperands.push_back(InOperandVal);
8070         break;
8071       }
8072 
8073       assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
8074               OpInfo.ConstraintType == TargetLowering::C_Register) &&
8075              "Unknown constraint type!");
8076 
8077       // TODO: Support this.
8078       if (OpInfo.isIndirect) {
8079         emitInlineAsmError(
8080             CS, "Don't know how to handle indirect register inputs yet "
8081                 "for constraint '" +
8082                     Twine(OpInfo.ConstraintCode) + "'");
8083         return;
8084       }
8085 
8086       // Copy the input into the appropriate registers.
8087       if (OpInfo.AssignedRegs.Regs.empty()) {
8088         emitInlineAsmError(CS, "couldn't allocate input reg for constraint '" +
8089                                    Twine(OpInfo.ConstraintCode) + "'");
8090         return;
8091       }
8092 
8093       SDLoc dl = getCurSDLoc();
8094 
8095       OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, dl,
8096                                         Chain, &Flag, CS.getInstruction());
8097 
8098       OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse, false, 0,
8099                                                dl, DAG, AsmNodeOperands);
8100       break;
8101     }
8102     case InlineAsm::isClobber:
8103       // Add the clobbered value to the operand list, so that the register
8104       // allocator is aware that the physreg got clobbered.
8105       if (!OpInfo.AssignedRegs.Regs.empty())
8106         OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_Clobber,
8107                                                  false, 0, getCurSDLoc(), DAG,
8108                                                  AsmNodeOperands);
8109       break;
8110     }
8111   }
8112 
8113   // Finish up input operands.  Set the input chain and add the flag last.
8114   AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
8115   if (Flag.getNode()) AsmNodeOperands.push_back(Flag);
8116 
8117   unsigned ISDOpc = isa<CallBrInst>(CS.getInstruction()) ? ISD::INLINEASM_BR : ISD::INLINEASM;
8118   Chain = DAG.getNode(ISDOpc, getCurSDLoc(),
8119                       DAG.getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
8120   Flag = Chain.getValue(1);
8121 
8122   // Do additional work to generate outputs.
8123 
8124   SmallVector<EVT, 1> ResultVTs;
8125   SmallVector<SDValue, 1> ResultValues;
8126   SmallVector<SDValue, 8> OutChains;
8127 
8128   llvm::Type *CSResultType = CS.getType();
8129   ArrayRef<Type *> ResultTypes;
8130   if (StructType *StructResult = dyn_cast<StructType>(CSResultType))
8131     ResultTypes = StructResult->elements();
8132   else if (!CSResultType->isVoidTy())
8133     ResultTypes = makeArrayRef(CSResultType);
8134 
8135   auto CurResultType = ResultTypes.begin();
8136   auto handleRegAssign = [&](SDValue V) {
8137     assert(CurResultType != ResultTypes.end() && "Unexpected value");
8138     assert((*CurResultType)->isSized() && "Unexpected unsized type");
8139     EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), *CurResultType);
8140     ++CurResultType;
8141     // If the type of the inline asm call site return value is different but has
8142     // same size as the type of the asm output bitcast it.  One example of this
8143     // is for vectors with different width / number of elements.  This can
8144     // happen for register classes that can contain multiple different value
8145     // types.  The preg or vreg allocated may not have the same VT as was
8146     // expected.
8147     //
8148     // This can also happen for a return value that disagrees with the register
8149     // class it is put in, eg. a double in a general-purpose register on a
8150     // 32-bit machine.
8151     if (ResultVT != V.getValueType() &&
8152         ResultVT.getSizeInBits() == V.getValueSizeInBits())
8153       V = DAG.getNode(ISD::BITCAST, getCurSDLoc(), ResultVT, V);
8154     else if (ResultVT != V.getValueType() && ResultVT.isInteger() &&
8155              V.getValueType().isInteger()) {
8156       // If a result value was tied to an input value, the computed result
8157       // may have a wider width than the expected result.  Extract the
8158       // relevant portion.
8159       V = DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), ResultVT, V);
8160     }
8161     assert(ResultVT == V.getValueType() && "Asm result value mismatch!");
8162     ResultVTs.push_back(ResultVT);
8163     ResultValues.push_back(V);
8164   };
8165 
8166   // Deal with output operands.
8167   for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
8168     if (OpInfo.Type == InlineAsm::isOutput) {
8169       SDValue Val;
8170       // Skip trivial output operands.
8171       if (OpInfo.AssignedRegs.Regs.empty())
8172         continue;
8173 
8174       switch (OpInfo.ConstraintType) {
8175       case TargetLowering::C_Register:
8176       case TargetLowering::C_RegisterClass:
8177         Val = OpInfo.AssignedRegs.getCopyFromRegs(
8178             DAG, FuncInfo, getCurSDLoc(), Chain, &Flag, CS.getInstruction());
8179         break;
8180       case TargetLowering::C_Other:
8181         Val = TLI.LowerAsmOutputForConstraint(Chain, Flag, getCurSDLoc(),
8182                                               OpInfo, DAG);
8183         break;
8184       case TargetLowering::C_Memory:
8185         break; // Already handled.
8186       case TargetLowering::C_Unknown:
8187         assert(false && "Unexpected unknown constraint");
8188       }
8189 
8190       // Indirect output manifest as stores. Record output chains.
8191       if (OpInfo.isIndirect) {
8192         const Value *Ptr = OpInfo.CallOperandVal;
8193         assert(Ptr && "Expected value CallOperandVal for indirect asm operand");
8194         SDValue Store = DAG.getStore(Chain, getCurSDLoc(), Val, getValue(Ptr),
8195                                      MachinePointerInfo(Ptr));
8196         OutChains.push_back(Store);
8197       } else {
8198         // generate CopyFromRegs to associated registers.
8199         assert(!CS.getType()->isVoidTy() && "Bad inline asm!");
8200         if (Val.getOpcode() == ISD::MERGE_VALUES) {
8201           for (const SDValue &V : Val->op_values())
8202             handleRegAssign(V);
8203         } else
8204           handleRegAssign(Val);
8205       }
8206     }
8207   }
8208 
8209   // Set results.
8210   if (!ResultValues.empty()) {
8211     assert(CurResultType == ResultTypes.end() &&
8212            "Mismatch in number of ResultTypes");
8213     assert(ResultValues.size() == ResultTypes.size() &&
8214            "Mismatch in number of output operands in asm result");
8215 
8216     SDValue V = DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
8217                             DAG.getVTList(ResultVTs), ResultValues);
8218     setValue(CS.getInstruction(), V);
8219   }
8220 
8221   // Collect store chains.
8222   if (!OutChains.empty())
8223     Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, OutChains);
8224 
8225   // Only Update Root if inline assembly has a memory effect.
8226   if (ResultValues.empty() || HasSideEffect || !OutChains.empty())
8227     DAG.setRoot(Chain);
8228 }
8229 
8230 void SelectionDAGBuilder::emitInlineAsmError(ImmutableCallSite CS,
8231                                              const Twine &Message) {
8232   LLVMContext &Ctx = *DAG.getContext();
8233   Ctx.emitError(CS.getInstruction(), Message);
8234 
8235   // Make sure we leave the DAG in a valid state
8236   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8237   SmallVector<EVT, 1> ValueVTs;
8238   ComputeValueVTs(TLI, DAG.getDataLayout(), CS->getType(), ValueVTs);
8239 
8240   if (ValueVTs.empty())
8241     return;
8242 
8243   SmallVector<SDValue, 1> Ops;
8244   for (unsigned i = 0, e = ValueVTs.size(); i != e; ++i)
8245     Ops.push_back(DAG.getUNDEF(ValueVTs[i]));
8246 
8247   setValue(CS.getInstruction(), DAG.getMergeValues(Ops, getCurSDLoc()));
8248 }
8249 
8250 void SelectionDAGBuilder::visitVAStart(const CallInst &I) {
8251   DAG.setRoot(DAG.getNode(ISD::VASTART, getCurSDLoc(),
8252                           MVT::Other, getRoot(),
8253                           getValue(I.getArgOperand(0)),
8254                           DAG.getSrcValue(I.getArgOperand(0))));
8255 }
8256 
8257 void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
8258   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8259   const DataLayout &DL = DAG.getDataLayout();
8260   SDValue V = DAG.getVAArg(TLI.getValueType(DAG.getDataLayout(), I.getType()),
8261                            getCurSDLoc(), getRoot(), getValue(I.getOperand(0)),
8262                            DAG.getSrcValue(I.getOperand(0)),
8263                            DL.getABITypeAlignment(I.getType()));
8264   setValue(&I, V);
8265   DAG.setRoot(V.getValue(1));
8266 }
8267 
8268 void SelectionDAGBuilder::visitVAEnd(const CallInst &I) {
8269   DAG.setRoot(DAG.getNode(ISD::VAEND, getCurSDLoc(),
8270                           MVT::Other, getRoot(),
8271                           getValue(I.getArgOperand(0)),
8272                           DAG.getSrcValue(I.getArgOperand(0))));
8273 }
8274 
8275 void SelectionDAGBuilder::visitVACopy(const CallInst &I) {
8276   DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurSDLoc(),
8277                           MVT::Other, getRoot(),
8278                           getValue(I.getArgOperand(0)),
8279                           getValue(I.getArgOperand(1)),
8280                           DAG.getSrcValue(I.getArgOperand(0)),
8281                           DAG.getSrcValue(I.getArgOperand(1))));
8282 }
8283 
8284 SDValue SelectionDAGBuilder::lowerRangeToAssertZExt(SelectionDAG &DAG,
8285                                                     const Instruction &I,
8286                                                     SDValue Op) {
8287   const MDNode *Range = I.getMetadata(LLVMContext::MD_range);
8288   if (!Range)
8289     return Op;
8290 
8291   ConstantRange CR = getConstantRangeFromMetadata(*Range);
8292   if (CR.isFullSet() || CR.isEmptySet() || CR.isWrappedSet())
8293     return Op;
8294 
8295   APInt Lo = CR.getUnsignedMin();
8296   if (!Lo.isMinValue())
8297     return Op;
8298 
8299   APInt Hi = CR.getUnsignedMax();
8300   unsigned Bits = std::max(Hi.getActiveBits(),
8301                            static_cast<unsigned>(IntegerType::MIN_INT_BITS));
8302 
8303   EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
8304 
8305   SDLoc SL = getCurSDLoc();
8306 
8307   SDValue ZExt = DAG.getNode(ISD::AssertZext, SL, Op.getValueType(), Op,
8308                              DAG.getValueType(SmallVT));
8309   unsigned NumVals = Op.getNode()->getNumValues();
8310   if (NumVals == 1)
8311     return ZExt;
8312 
8313   SmallVector<SDValue, 4> Ops;
8314 
8315   Ops.push_back(ZExt);
8316   for (unsigned I = 1; I != NumVals; ++I)
8317     Ops.push_back(Op.getValue(I));
8318 
8319   return DAG.getMergeValues(Ops, SL);
8320 }
8321 
8322 /// Populate a CallLowerinInfo (into \p CLI) based on the properties of
8323 /// the call being lowered.
8324 ///
8325 /// This is a helper for lowering intrinsics that follow a target calling
8326 /// convention or require stack pointer adjustment. Only a subset of the
8327 /// intrinsic's operands need to participate in the calling convention.
8328 void SelectionDAGBuilder::populateCallLoweringInfo(
8329     TargetLowering::CallLoweringInfo &CLI, const CallBase *Call,
8330     unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy,
8331     bool IsPatchPoint) {
8332   TargetLowering::ArgListTy Args;
8333   Args.reserve(NumArgs);
8334 
8335   // Populate the argument list.
8336   // Attributes for args start at offset 1, after the return attribute.
8337   for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs;
8338        ArgI != ArgE; ++ArgI) {
8339     const Value *V = Call->getOperand(ArgI);
8340 
8341     assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
8342 
8343     TargetLowering::ArgListEntry Entry;
8344     Entry.Node = getValue(V);
8345     Entry.Ty = V->getType();
8346     Entry.setAttributes(Call, ArgI);
8347     Args.push_back(Entry);
8348   }
8349 
8350   CLI.setDebugLoc(getCurSDLoc())
8351       .setChain(getRoot())
8352       .setCallee(Call->getCallingConv(), ReturnTy, Callee, std::move(Args))
8353       .setDiscardResult(Call->use_empty())
8354       .setIsPatchPoint(IsPatchPoint);
8355 }
8356 
8357 /// Add a stack map intrinsic call's live variable operands to a stackmap
8358 /// or patchpoint target node's operand list.
8359 ///
8360 /// Constants are converted to TargetConstants purely as an optimization to
8361 /// avoid constant materialization and register allocation.
8362 ///
8363 /// FrameIndex operands are converted to TargetFrameIndex so that ISEL does not
8364 /// generate addess computation nodes, and so ExpandISelPseudo can convert the
8365 /// TargetFrameIndex into a DirectMemRefOp StackMap location. This avoids
8366 /// address materialization and register allocation, but may also be required
8367 /// for correctness. If a StackMap (or PatchPoint) intrinsic directly uses an
8368 /// alloca in the entry block, then the runtime may assume that the alloca's
8369 /// StackMap location can be read immediately after compilation and that the
8370 /// location is valid at any point during execution (this is similar to the
8371 /// assumption made by the llvm.gcroot intrinsic). If the alloca's location were
8372 /// only available in a register, then the runtime would need to trap when
8373 /// execution reaches the StackMap in order to read the alloca's location.
8374 static void addStackMapLiveVars(ImmutableCallSite CS, unsigned StartIdx,
8375                                 const SDLoc &DL, SmallVectorImpl<SDValue> &Ops,
8376                                 SelectionDAGBuilder &Builder) {
8377   for (unsigned i = StartIdx, e = CS.arg_size(); i != e; ++i) {
8378     SDValue OpVal = Builder.getValue(CS.getArgument(i));
8379     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(OpVal)) {
8380       Ops.push_back(
8381         Builder.DAG.getTargetConstant(StackMaps::ConstantOp, DL, MVT::i64));
8382       Ops.push_back(
8383         Builder.DAG.getTargetConstant(C->getSExtValue(), DL, MVT::i64));
8384     } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(OpVal)) {
8385       const TargetLowering &TLI = Builder.DAG.getTargetLoweringInfo();
8386       Ops.push_back(Builder.DAG.getTargetFrameIndex(
8387           FI->getIndex(), TLI.getFrameIndexTy(Builder.DAG.getDataLayout())));
8388     } else
8389       Ops.push_back(OpVal);
8390   }
8391 }
8392 
8393 /// Lower llvm.experimental.stackmap directly to its target opcode.
8394 void SelectionDAGBuilder::visitStackmap(const CallInst &CI) {
8395   // void @llvm.experimental.stackmap(i32 <id>, i32 <numShadowBytes>,
8396   //                                  [live variables...])
8397 
8398   assert(CI.getType()->isVoidTy() && "Stackmap cannot return a value.");
8399 
8400   SDValue Chain, InFlag, Callee, NullPtr;
8401   SmallVector<SDValue, 32> Ops;
8402 
8403   SDLoc DL = getCurSDLoc();
8404   Callee = getValue(CI.getCalledValue());
8405   NullPtr = DAG.getIntPtrConstant(0, DL, true);
8406 
8407   // The stackmap intrinsic only records the live variables (the arguemnts
8408   // passed to it) and emits NOPS (if requested). Unlike the patchpoint
8409   // intrinsic, this won't be lowered to a function call. This means we don't
8410   // have to worry about calling conventions and target specific lowering code.
8411   // Instead we perform the call lowering right here.
8412   //
8413   // chain, flag = CALLSEQ_START(chain, 0, 0)
8414   // chain, flag = STACKMAP(id, nbytes, ..., chain, flag)
8415   // chain, flag = CALLSEQ_END(chain, 0, 0, flag)
8416   //
8417   Chain = DAG.getCALLSEQ_START(getRoot(), 0, 0, DL);
8418   InFlag = Chain.getValue(1);
8419 
8420   // Add the <id> and <numBytes> constants.
8421   SDValue IDVal = getValue(CI.getOperand(PatchPointOpers::IDPos));
8422   Ops.push_back(DAG.getTargetConstant(
8423                   cast<ConstantSDNode>(IDVal)->getZExtValue(), DL, MVT::i64));
8424   SDValue NBytesVal = getValue(CI.getOperand(PatchPointOpers::NBytesPos));
8425   Ops.push_back(DAG.getTargetConstant(
8426                   cast<ConstantSDNode>(NBytesVal)->getZExtValue(), DL,
8427                   MVT::i32));
8428 
8429   // Push live variables for the stack map.
8430   addStackMapLiveVars(&CI, 2, DL, Ops, *this);
8431 
8432   // We are not pushing any register mask info here on the operands list,
8433   // because the stackmap doesn't clobber anything.
8434 
8435   // Push the chain and the glue flag.
8436   Ops.push_back(Chain);
8437   Ops.push_back(InFlag);
8438 
8439   // Create the STACKMAP node.
8440   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
8441   SDNode *SM = DAG.getMachineNode(TargetOpcode::STACKMAP, DL, NodeTys, Ops);
8442   Chain = SDValue(SM, 0);
8443   InFlag = Chain.getValue(1);
8444 
8445   Chain = DAG.getCALLSEQ_END(Chain, NullPtr, NullPtr, InFlag, DL);
8446 
8447   // Stackmaps don't generate values, so nothing goes into the NodeMap.
8448 
8449   // Set the root to the target-lowered call chain.
8450   DAG.setRoot(Chain);
8451 
8452   // Inform the Frame Information that we have a stackmap in this function.
8453   FuncInfo.MF->getFrameInfo().setHasStackMap();
8454 }
8455 
8456 /// Lower llvm.experimental.patchpoint directly to its target opcode.
8457 void SelectionDAGBuilder::visitPatchpoint(ImmutableCallSite CS,
8458                                           const BasicBlock *EHPadBB) {
8459   // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
8460   //                                                 i32 <numBytes>,
8461   //                                                 i8* <target>,
8462   //                                                 i32 <numArgs>,
8463   //                                                 [Args...],
8464   //                                                 [live variables...])
8465 
8466   CallingConv::ID CC = CS.getCallingConv();
8467   bool IsAnyRegCC = CC == CallingConv::AnyReg;
8468   bool HasDef = !CS->getType()->isVoidTy();
8469   SDLoc dl = getCurSDLoc();
8470   SDValue Callee = getValue(CS->getOperand(PatchPointOpers::TargetPos));
8471 
8472   // Handle immediate and symbolic callees.
8473   if (auto* ConstCallee = dyn_cast<ConstantSDNode>(Callee))
8474     Callee = DAG.getIntPtrConstant(ConstCallee->getZExtValue(), dl,
8475                                    /*isTarget=*/true);
8476   else if (auto* SymbolicCallee = dyn_cast<GlobalAddressSDNode>(Callee))
8477     Callee =  DAG.getTargetGlobalAddress(SymbolicCallee->getGlobal(),
8478                                          SDLoc(SymbolicCallee),
8479                                          SymbolicCallee->getValueType(0));
8480 
8481   // Get the real number of arguments participating in the call <numArgs>
8482   SDValue NArgVal = getValue(CS.getArgument(PatchPointOpers::NArgPos));
8483   unsigned NumArgs = cast<ConstantSDNode>(NArgVal)->getZExtValue();
8484 
8485   // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
8486   // Intrinsics include all meta-operands up to but not including CC.
8487   unsigned NumMetaOpers = PatchPointOpers::CCPos;
8488   assert(CS.arg_size() >= NumMetaOpers + NumArgs &&
8489          "Not enough arguments provided to the patchpoint intrinsic");
8490 
8491   // For AnyRegCC the arguments are lowered later on manually.
8492   unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
8493   Type *ReturnTy =
8494     IsAnyRegCC ? Type::getVoidTy(*DAG.getContext()) : CS->getType();
8495 
8496   TargetLowering::CallLoweringInfo CLI(DAG);
8497   populateCallLoweringInfo(CLI, cast<CallBase>(CS.getInstruction()),
8498                            NumMetaOpers, NumCallArgs, Callee, ReturnTy, true);
8499   std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
8500 
8501   SDNode *CallEnd = Result.second.getNode();
8502   if (HasDef && (CallEnd->getOpcode() == ISD::CopyFromReg))
8503     CallEnd = CallEnd->getOperand(0).getNode();
8504 
8505   /// Get a call instruction from the call sequence chain.
8506   /// Tail calls are not allowed.
8507   assert(CallEnd->getOpcode() == ISD::CALLSEQ_END &&
8508          "Expected a callseq node.");
8509   SDNode *Call = CallEnd->getOperand(0).getNode();
8510   bool HasGlue = Call->getGluedNode();
8511 
8512   // Replace the target specific call node with the patchable intrinsic.
8513   SmallVector<SDValue, 8> Ops;
8514 
8515   // Add the <id> and <numBytes> constants.
8516   SDValue IDVal = getValue(CS->getOperand(PatchPointOpers::IDPos));
8517   Ops.push_back(DAG.getTargetConstant(
8518                   cast<ConstantSDNode>(IDVal)->getZExtValue(), dl, MVT::i64));
8519   SDValue NBytesVal = getValue(CS->getOperand(PatchPointOpers::NBytesPos));
8520   Ops.push_back(DAG.getTargetConstant(
8521                   cast<ConstantSDNode>(NBytesVal)->getZExtValue(), dl,
8522                   MVT::i32));
8523 
8524   // Add the callee.
8525   Ops.push_back(Callee);
8526 
8527   // Adjust <numArgs> to account for any arguments that have been passed on the
8528   // stack instead.
8529   // Call Node: Chain, Target, {Args}, RegMask, [Glue]
8530   unsigned NumCallRegArgs = Call->getNumOperands() - (HasGlue ? 4 : 3);
8531   NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs;
8532   Ops.push_back(DAG.getTargetConstant(NumCallRegArgs, dl, MVT::i32));
8533 
8534   // Add the calling convention
8535   Ops.push_back(DAG.getTargetConstant((unsigned)CC, dl, MVT::i32));
8536 
8537   // Add the arguments we omitted previously. The register allocator should
8538   // place these in any free register.
8539   if (IsAnyRegCC)
8540     for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i)
8541       Ops.push_back(getValue(CS.getArgument(i)));
8542 
8543   // Push the arguments from the call instruction up to the register mask.
8544   SDNode::op_iterator e = HasGlue ? Call->op_end()-2 : Call->op_end()-1;
8545   Ops.append(Call->op_begin() + 2, e);
8546 
8547   // Push live variables for the stack map.
8548   addStackMapLiveVars(CS, NumMetaOpers + NumArgs, dl, Ops, *this);
8549 
8550   // Push the register mask info.
8551   if (HasGlue)
8552     Ops.push_back(*(Call->op_end()-2));
8553   else
8554     Ops.push_back(*(Call->op_end()-1));
8555 
8556   // Push the chain (this is originally the first operand of the call, but
8557   // becomes now the last or second to last operand).
8558   Ops.push_back(*(Call->op_begin()));
8559 
8560   // Push the glue flag (last operand).
8561   if (HasGlue)
8562     Ops.push_back(*(Call->op_end()-1));
8563 
8564   SDVTList NodeTys;
8565   if (IsAnyRegCC && HasDef) {
8566     // Create the return types based on the intrinsic definition
8567     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8568     SmallVector<EVT, 3> ValueVTs;
8569     ComputeValueVTs(TLI, DAG.getDataLayout(), CS->getType(), ValueVTs);
8570     assert(ValueVTs.size() == 1 && "Expected only one return value type.");
8571 
8572     // There is always a chain and a glue type at the end
8573     ValueVTs.push_back(MVT::Other);
8574     ValueVTs.push_back(MVT::Glue);
8575     NodeTys = DAG.getVTList(ValueVTs);
8576   } else
8577     NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
8578 
8579   // Replace the target specific call node with a PATCHPOINT node.
8580   MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHPOINT,
8581                                          dl, NodeTys, Ops);
8582 
8583   // Update the NodeMap.
8584   if (HasDef) {
8585     if (IsAnyRegCC)
8586       setValue(CS.getInstruction(), SDValue(MN, 0));
8587     else
8588       setValue(CS.getInstruction(), Result.first);
8589   }
8590 
8591   // Fixup the consumers of the intrinsic. The chain and glue may be used in the
8592   // call sequence. Furthermore the location of the chain and glue can change
8593   // when the AnyReg calling convention is used and the intrinsic returns a
8594   // value.
8595   if (IsAnyRegCC && HasDef) {
8596     SDValue From[] = {SDValue(Call, 0), SDValue(Call, 1)};
8597     SDValue To[] = {SDValue(MN, 1), SDValue(MN, 2)};
8598     DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
8599   } else
8600     DAG.ReplaceAllUsesWith(Call, MN);
8601   DAG.DeleteNode(Call);
8602 
8603   // Inform the Frame Information that we have a patchpoint in this function.
8604   FuncInfo.MF->getFrameInfo().setHasPatchPoint();
8605 }
8606 
8607 void SelectionDAGBuilder::visitVectorReduce(const CallInst &I,
8608                                             unsigned Intrinsic) {
8609   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8610   SDValue Op1 = getValue(I.getArgOperand(0));
8611   SDValue Op2;
8612   if (I.getNumArgOperands() > 1)
8613     Op2 = getValue(I.getArgOperand(1));
8614   SDLoc dl = getCurSDLoc();
8615   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
8616   SDValue Res;
8617   FastMathFlags FMF;
8618   if (isa<FPMathOperator>(I))
8619     FMF = I.getFastMathFlags();
8620 
8621   switch (Intrinsic) {
8622   case Intrinsic::experimental_vector_reduce_fadd:
8623     if (FMF.isFast())
8624       Res = DAG.getNode(ISD::VECREDUCE_FADD, dl, VT, Op2);
8625     else
8626       Res = DAG.getNode(ISD::VECREDUCE_STRICT_FADD, dl, VT, Op1, Op2);
8627     break;
8628   case Intrinsic::experimental_vector_reduce_fmul:
8629     if (FMF.isFast())
8630       Res = DAG.getNode(ISD::VECREDUCE_FMUL, dl, VT, Op2);
8631     else
8632       Res = DAG.getNode(ISD::VECREDUCE_STRICT_FMUL, dl, VT, Op1, Op2);
8633     break;
8634   case Intrinsic::experimental_vector_reduce_add:
8635     Res = DAG.getNode(ISD::VECREDUCE_ADD, dl, VT, Op1);
8636     break;
8637   case Intrinsic::experimental_vector_reduce_mul:
8638     Res = DAG.getNode(ISD::VECREDUCE_MUL, dl, VT, Op1);
8639     break;
8640   case Intrinsic::experimental_vector_reduce_and:
8641     Res = DAG.getNode(ISD::VECREDUCE_AND, dl, VT, Op1);
8642     break;
8643   case Intrinsic::experimental_vector_reduce_or:
8644     Res = DAG.getNode(ISD::VECREDUCE_OR, dl, VT, Op1);
8645     break;
8646   case Intrinsic::experimental_vector_reduce_xor:
8647     Res = DAG.getNode(ISD::VECREDUCE_XOR, dl, VT, Op1);
8648     break;
8649   case Intrinsic::experimental_vector_reduce_smax:
8650     Res = DAG.getNode(ISD::VECREDUCE_SMAX, dl, VT, Op1);
8651     break;
8652   case Intrinsic::experimental_vector_reduce_smin:
8653     Res = DAG.getNode(ISD::VECREDUCE_SMIN, dl, VT, Op1);
8654     break;
8655   case Intrinsic::experimental_vector_reduce_umax:
8656     Res = DAG.getNode(ISD::VECREDUCE_UMAX, dl, VT, Op1);
8657     break;
8658   case Intrinsic::experimental_vector_reduce_umin:
8659     Res = DAG.getNode(ISD::VECREDUCE_UMIN, dl, VT, Op1);
8660     break;
8661   case Intrinsic::experimental_vector_reduce_fmax:
8662     Res = DAG.getNode(ISD::VECREDUCE_FMAX, dl, VT, Op1);
8663     break;
8664   case Intrinsic::experimental_vector_reduce_fmin:
8665     Res = DAG.getNode(ISD::VECREDUCE_FMIN, dl, VT, Op1);
8666     break;
8667   default:
8668     llvm_unreachable("Unhandled vector reduce intrinsic");
8669   }
8670   setValue(&I, Res);
8671 }
8672 
8673 /// Returns an AttributeList representing the attributes applied to the return
8674 /// value of the given call.
8675 static AttributeList getReturnAttrs(TargetLowering::CallLoweringInfo &CLI) {
8676   SmallVector<Attribute::AttrKind, 2> Attrs;
8677   if (CLI.RetSExt)
8678     Attrs.push_back(Attribute::SExt);
8679   if (CLI.RetZExt)
8680     Attrs.push_back(Attribute::ZExt);
8681   if (CLI.IsInReg)
8682     Attrs.push_back(Attribute::InReg);
8683 
8684   return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex,
8685                             Attrs);
8686 }
8687 
8688 /// TargetLowering::LowerCallTo - This is the default LowerCallTo
8689 /// implementation, which just calls LowerCall.
8690 /// FIXME: When all targets are
8691 /// migrated to using LowerCall, this hook should be integrated into SDISel.
8692 std::pair<SDValue, SDValue>
8693 TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
8694   // Handle the incoming return values from the call.
8695   CLI.Ins.clear();
8696   Type *OrigRetTy = CLI.RetTy;
8697   SmallVector<EVT, 4> RetTys;
8698   SmallVector<uint64_t, 4> Offsets;
8699   auto &DL = CLI.DAG.getDataLayout();
8700   ComputeValueVTs(*this, DL, CLI.RetTy, RetTys, &Offsets);
8701 
8702   if (CLI.IsPostTypeLegalization) {
8703     // If we are lowering a libcall after legalization, split the return type.
8704     SmallVector<EVT, 4> OldRetTys = std::move(RetTys);
8705     SmallVector<uint64_t, 4> OldOffsets = std::move(Offsets);
8706     for (size_t i = 0, e = OldRetTys.size(); i != e; ++i) {
8707       EVT RetVT = OldRetTys[i];
8708       uint64_t Offset = OldOffsets[i];
8709       MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), RetVT);
8710       unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), RetVT);
8711       unsigned RegisterVTByteSZ = RegisterVT.getSizeInBits() / 8;
8712       RetTys.append(NumRegs, RegisterVT);
8713       for (unsigned j = 0; j != NumRegs; ++j)
8714         Offsets.push_back(Offset + j * RegisterVTByteSZ);
8715     }
8716   }
8717 
8718   SmallVector<ISD::OutputArg, 4> Outs;
8719   GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, *this, DL);
8720 
8721   bool CanLowerReturn =
8722       this->CanLowerReturn(CLI.CallConv, CLI.DAG.getMachineFunction(),
8723                            CLI.IsVarArg, Outs, CLI.RetTy->getContext());
8724 
8725   SDValue DemoteStackSlot;
8726   int DemoteStackIdx = -100;
8727   if (!CanLowerReturn) {
8728     // FIXME: equivalent assert?
8729     // assert(!CS.hasInAllocaArgument() &&
8730     //        "sret demotion is incompatible with inalloca");
8731     uint64_t TySize = DL.getTypeAllocSize(CLI.RetTy);
8732     unsigned Align = DL.getPrefTypeAlignment(CLI.RetTy);
8733     MachineFunction &MF = CLI.DAG.getMachineFunction();
8734     DemoteStackIdx = MF.getFrameInfo().CreateStackObject(TySize, Align, false);
8735     Type *StackSlotPtrType = PointerType::get(CLI.RetTy,
8736                                               DL.getAllocaAddrSpace());
8737 
8738     DemoteStackSlot = CLI.DAG.getFrameIndex(DemoteStackIdx, getFrameIndexTy(DL));
8739     ArgListEntry Entry;
8740     Entry.Node = DemoteStackSlot;
8741     Entry.Ty = StackSlotPtrType;
8742     Entry.IsSExt = false;
8743     Entry.IsZExt = false;
8744     Entry.IsInReg = false;
8745     Entry.IsSRet = true;
8746     Entry.IsNest = false;
8747     Entry.IsByVal = false;
8748     Entry.IsReturned = false;
8749     Entry.IsSwiftSelf = false;
8750     Entry.IsSwiftError = false;
8751     Entry.Alignment = Align;
8752     CLI.getArgs().insert(CLI.getArgs().begin(), Entry);
8753     CLI.NumFixedArgs += 1;
8754     CLI.RetTy = Type::getVoidTy(CLI.RetTy->getContext());
8755 
8756     // sret demotion isn't compatible with tail-calls, since the sret argument
8757     // points into the callers stack frame.
8758     CLI.IsTailCall = false;
8759   } else {
8760     for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
8761       EVT VT = RetTys[I];
8762       MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
8763                                                      CLI.CallConv, VT);
8764       unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
8765                                                        CLI.CallConv, VT);
8766       for (unsigned i = 0; i != NumRegs; ++i) {
8767         ISD::InputArg MyFlags;
8768         MyFlags.VT = RegisterVT;
8769         MyFlags.ArgVT = VT;
8770         MyFlags.Used = CLI.IsReturnValueUsed;
8771         if (CLI.RetSExt)
8772           MyFlags.Flags.setSExt();
8773         if (CLI.RetZExt)
8774           MyFlags.Flags.setZExt();
8775         if (CLI.IsInReg)
8776           MyFlags.Flags.setInReg();
8777         CLI.Ins.push_back(MyFlags);
8778       }
8779     }
8780   }
8781 
8782   // We push in swifterror return as the last element of CLI.Ins.
8783   ArgListTy &Args = CLI.getArgs();
8784   if (supportSwiftError()) {
8785     for (unsigned i = 0, e = Args.size(); i != e; ++i) {
8786       if (Args[i].IsSwiftError) {
8787         ISD::InputArg MyFlags;
8788         MyFlags.VT = getPointerTy(DL);
8789         MyFlags.ArgVT = EVT(getPointerTy(DL));
8790         MyFlags.Flags.setSwiftError();
8791         CLI.Ins.push_back(MyFlags);
8792       }
8793     }
8794   }
8795 
8796   // Handle all of the outgoing arguments.
8797   CLI.Outs.clear();
8798   CLI.OutVals.clear();
8799   for (unsigned i = 0, e = Args.size(); i != e; ++i) {
8800     SmallVector<EVT, 4> ValueVTs;
8801     ComputeValueVTs(*this, DL, Args[i].Ty, ValueVTs);
8802     // FIXME: Split arguments if CLI.IsPostTypeLegalization
8803     Type *FinalType = Args[i].Ty;
8804     if (Args[i].IsByVal)
8805       FinalType = cast<PointerType>(Args[i].Ty)->getElementType();
8806     bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
8807         FinalType, CLI.CallConv, CLI.IsVarArg);
8808     for (unsigned Value = 0, NumValues = ValueVTs.size(); Value != NumValues;
8809          ++Value) {
8810       EVT VT = ValueVTs[Value];
8811       Type *ArgTy = VT.getTypeForEVT(CLI.RetTy->getContext());
8812       SDValue Op = SDValue(Args[i].Node.getNode(),
8813                            Args[i].Node.getResNo() + Value);
8814       ISD::ArgFlagsTy Flags;
8815 
8816       // Certain targets (such as MIPS), may have a different ABI alignment
8817       // for a type depending on the context. Give the target a chance to
8818       // specify the alignment it wants.
8819       unsigned OriginalAlignment = getABIAlignmentForCallingConv(ArgTy, DL);
8820 
8821       if (Args[i].IsZExt)
8822         Flags.setZExt();
8823       if (Args[i].IsSExt)
8824         Flags.setSExt();
8825       if (Args[i].IsInReg) {
8826         // If we are using vectorcall calling convention, a structure that is
8827         // passed InReg - is surely an HVA
8828         if (CLI.CallConv == CallingConv::X86_VectorCall &&
8829             isa<StructType>(FinalType)) {
8830           // The first value of a structure is marked
8831           if (0 == Value)
8832             Flags.setHvaStart();
8833           Flags.setHva();
8834         }
8835         // Set InReg Flag
8836         Flags.setInReg();
8837       }
8838       if (Args[i].IsSRet)
8839         Flags.setSRet();
8840       if (Args[i].IsSwiftSelf)
8841         Flags.setSwiftSelf();
8842       if (Args[i].IsSwiftError)
8843         Flags.setSwiftError();
8844       if (Args[i].IsByVal)
8845         Flags.setByVal();
8846       if (Args[i].IsInAlloca) {
8847         Flags.setInAlloca();
8848         // Set the byval flag for CCAssignFn callbacks that don't know about
8849         // inalloca.  This way we can know how many bytes we should've allocated
8850         // and how many bytes a callee cleanup function will pop.  If we port
8851         // inalloca to more targets, we'll have to add custom inalloca handling
8852         // in the various CC lowering callbacks.
8853         Flags.setByVal();
8854       }
8855       if (Args[i].IsByVal || Args[i].IsInAlloca) {
8856         PointerType *Ty = cast<PointerType>(Args[i].Ty);
8857         Type *ElementTy = Ty->getElementType();
8858         Flags.setByValSize(DL.getTypeAllocSize(ElementTy));
8859         // For ByVal, alignment should come from FE.  BE will guess if this
8860         // info is not there but there are cases it cannot get right.
8861         unsigned FrameAlign;
8862         if (Args[i].Alignment)
8863           FrameAlign = Args[i].Alignment;
8864         else
8865           FrameAlign = getByValTypeAlignment(ElementTy, DL);
8866         Flags.setByValAlign(FrameAlign);
8867       }
8868       if (Args[i].IsNest)
8869         Flags.setNest();
8870       if (NeedsRegBlock)
8871         Flags.setInConsecutiveRegs();
8872       Flags.setOrigAlign(OriginalAlignment);
8873 
8874       MVT PartVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
8875                                                  CLI.CallConv, VT);
8876       unsigned NumParts = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
8877                                                         CLI.CallConv, VT);
8878       SmallVector<SDValue, 4> Parts(NumParts);
8879       ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
8880 
8881       if (Args[i].IsSExt)
8882         ExtendKind = ISD::SIGN_EXTEND;
8883       else if (Args[i].IsZExt)
8884         ExtendKind = ISD::ZERO_EXTEND;
8885 
8886       // Conservatively only handle 'returned' on non-vectors that can be lowered,
8887       // for now.
8888       if (Args[i].IsReturned && !Op.getValueType().isVector() &&
8889           CanLowerReturn) {
8890         assert(CLI.RetTy == Args[i].Ty && RetTys.size() == NumValues &&
8891                "unexpected use of 'returned'");
8892         // Before passing 'returned' to the target lowering code, ensure that
8893         // either the register MVT and the actual EVT are the same size or that
8894         // the return value and argument are extended in the same way; in these
8895         // cases it's safe to pass the argument register value unchanged as the
8896         // return register value (although it's at the target's option whether
8897         // to do so)
8898         // TODO: allow code generation to take advantage of partially preserved
8899         // registers rather than clobbering the entire register when the
8900         // parameter extension method is not compatible with the return
8901         // extension method
8902         if ((NumParts * PartVT.getSizeInBits() == VT.getSizeInBits()) ||
8903             (ExtendKind != ISD::ANY_EXTEND && CLI.RetSExt == Args[i].IsSExt &&
8904              CLI.RetZExt == Args[i].IsZExt))
8905           Flags.setReturned();
8906       }
8907 
8908       getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT,
8909                      CLI.CS.getInstruction(), CLI.CallConv, ExtendKind);
8910 
8911       for (unsigned j = 0; j != NumParts; ++j) {
8912         // if it isn't first piece, alignment must be 1
8913         ISD::OutputArg MyFlags(Flags, Parts[j].getValueType(), VT,
8914                                i < CLI.NumFixedArgs,
8915                                i, j*Parts[j].getValueType().getStoreSize());
8916         if (NumParts > 1 && j == 0)
8917           MyFlags.Flags.setSplit();
8918         else if (j != 0) {
8919           MyFlags.Flags.setOrigAlign(1);
8920           if (j == NumParts - 1)
8921             MyFlags.Flags.setSplitEnd();
8922         }
8923 
8924         CLI.Outs.push_back(MyFlags);
8925         CLI.OutVals.push_back(Parts[j]);
8926       }
8927 
8928       if (NeedsRegBlock && Value == NumValues - 1)
8929         CLI.Outs[CLI.Outs.size() - 1].Flags.setInConsecutiveRegsLast();
8930     }
8931   }
8932 
8933   SmallVector<SDValue, 4> InVals;
8934   CLI.Chain = LowerCall(CLI, InVals);
8935 
8936   // Update CLI.InVals to use outside of this function.
8937   CLI.InVals = InVals;
8938 
8939   // Verify that the target's LowerCall behaved as expected.
8940   assert(CLI.Chain.getNode() && CLI.Chain.getValueType() == MVT::Other &&
8941          "LowerCall didn't return a valid chain!");
8942   assert((!CLI.IsTailCall || InVals.empty()) &&
8943          "LowerCall emitted a return value for a tail call!");
8944   assert((CLI.IsTailCall || InVals.size() == CLI.Ins.size()) &&
8945          "LowerCall didn't emit the correct number of values!");
8946 
8947   // For a tail call, the return value is merely live-out and there aren't
8948   // any nodes in the DAG representing it. Return a special value to
8949   // indicate that a tail call has been emitted and no more Instructions
8950   // should be processed in the current block.
8951   if (CLI.IsTailCall) {
8952     CLI.DAG.setRoot(CLI.Chain);
8953     return std::make_pair(SDValue(), SDValue());
8954   }
8955 
8956 #ifndef NDEBUG
8957   for (unsigned i = 0, e = CLI.Ins.size(); i != e; ++i) {
8958     assert(InVals[i].getNode() && "LowerCall emitted a null value!");
8959     assert(EVT(CLI.Ins[i].VT) == InVals[i].getValueType() &&
8960            "LowerCall emitted a value with the wrong type!");
8961   }
8962 #endif
8963 
8964   SmallVector<SDValue, 4> ReturnValues;
8965   if (!CanLowerReturn) {
8966     // The instruction result is the result of loading from the
8967     // hidden sret parameter.
8968     SmallVector<EVT, 1> PVTs;
8969     Type *PtrRetTy = OrigRetTy->getPointerTo(DL.getAllocaAddrSpace());
8970 
8971     ComputeValueVTs(*this, DL, PtrRetTy, PVTs);
8972     assert(PVTs.size() == 1 && "Pointers should fit in one register");
8973     EVT PtrVT = PVTs[0];
8974 
8975     unsigned NumValues = RetTys.size();
8976     ReturnValues.resize(NumValues);
8977     SmallVector<SDValue, 4> Chains(NumValues);
8978 
8979     // An aggregate return value cannot wrap around the address space, so
8980     // offsets to its parts don't wrap either.
8981     SDNodeFlags Flags;
8982     Flags.setNoUnsignedWrap(true);
8983 
8984     for (unsigned i = 0; i < NumValues; ++i) {
8985       SDValue Add = CLI.DAG.getNode(ISD::ADD, CLI.DL, PtrVT, DemoteStackSlot,
8986                                     CLI.DAG.getConstant(Offsets[i], CLI.DL,
8987                                                         PtrVT), Flags);
8988       SDValue L = CLI.DAG.getLoad(
8989           RetTys[i], CLI.DL, CLI.Chain, Add,
8990           MachinePointerInfo::getFixedStack(CLI.DAG.getMachineFunction(),
8991                                             DemoteStackIdx, Offsets[i]),
8992           /* Alignment = */ 1);
8993       ReturnValues[i] = L;
8994       Chains[i] = L.getValue(1);
8995     }
8996 
8997     CLI.Chain = CLI.DAG.getNode(ISD::TokenFactor, CLI.DL, MVT::Other, Chains);
8998   } else {
8999     // Collect the legal value parts into potentially illegal values
9000     // that correspond to the original function's return values.
9001     Optional<ISD::NodeType> AssertOp;
9002     if (CLI.RetSExt)
9003       AssertOp = ISD::AssertSext;
9004     else if (CLI.RetZExt)
9005       AssertOp = ISD::AssertZext;
9006     unsigned CurReg = 0;
9007     for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
9008       EVT VT = RetTys[I];
9009       MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
9010                                                      CLI.CallConv, VT);
9011       unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
9012                                                        CLI.CallConv, VT);
9013 
9014       ReturnValues.push_back(getCopyFromParts(CLI.DAG, CLI.DL, &InVals[CurReg],
9015                                               NumRegs, RegisterVT, VT, nullptr,
9016                                               CLI.CallConv, AssertOp));
9017       CurReg += NumRegs;
9018     }
9019 
9020     // For a function returning void, there is no return value. We can't create
9021     // such a node, so we just return a null return value in that case. In
9022     // that case, nothing will actually look at the value.
9023     if (ReturnValues.empty())
9024       return std::make_pair(SDValue(), CLI.Chain);
9025   }
9026 
9027   SDValue Res = CLI.DAG.getNode(ISD::MERGE_VALUES, CLI.DL,
9028                                 CLI.DAG.getVTList(RetTys), ReturnValues);
9029   return std::make_pair(Res, CLI.Chain);
9030 }
9031 
9032 void TargetLowering::LowerOperationWrapper(SDNode *N,
9033                                            SmallVectorImpl<SDValue> &Results,
9034                                            SelectionDAG &DAG) const {
9035   if (SDValue Res = LowerOperation(SDValue(N, 0), DAG))
9036     Results.push_back(Res);
9037 }
9038 
9039 SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
9040   llvm_unreachable("LowerOperation not implemented for this target!");
9041 }
9042 
9043 void
9044 SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) {
9045   SDValue Op = getNonRegisterValue(V);
9046   assert((Op.getOpcode() != ISD::CopyFromReg ||
9047           cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
9048          "Copy from a reg to the same reg!");
9049   assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg");
9050 
9051   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9052   // If this is an InlineAsm we have to match the registers required, not the
9053   // notional registers required by the type.
9054 
9055   RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg, V->getType(),
9056                    None); // This is not an ABI copy.
9057   SDValue Chain = DAG.getEntryNode();
9058 
9059   ISD::NodeType ExtendType = (FuncInfo.PreferredExtendType.find(V) ==
9060                               FuncInfo.PreferredExtendType.end())
9061                                  ? ISD::ANY_EXTEND
9062                                  : FuncInfo.PreferredExtendType[V];
9063   RFV.getCopyToRegs(Op, DAG, getCurSDLoc(), Chain, nullptr, V, ExtendType);
9064   PendingExports.push_back(Chain);
9065 }
9066 
9067 #include "llvm/CodeGen/SelectionDAGISel.h"
9068 
9069 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
9070 /// entry block, return true.  This includes arguments used by switches, since
9071 /// the switch may expand into multiple basic blocks.
9072 static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) {
9073   // With FastISel active, we may be splitting blocks, so force creation
9074   // of virtual registers for all non-dead arguments.
9075   if (FastISel)
9076     return A->use_empty();
9077 
9078   const BasicBlock &Entry = A->getParent()->front();
9079   for (const User *U : A->users())
9080     if (cast<Instruction>(U)->getParent() != &Entry || isa<SwitchInst>(U))
9081       return false;  // Use not in entry block.
9082 
9083   return true;
9084 }
9085 
9086 using ArgCopyElisionMapTy =
9087     DenseMap<const Argument *,
9088              std::pair<const AllocaInst *, const StoreInst *>>;
9089 
9090 /// Scan the entry block of the function in FuncInfo for arguments that look
9091 /// like copies into a local alloca. Record any copied arguments in
9092 /// ArgCopyElisionCandidates.
9093 static void
9094 findArgumentCopyElisionCandidates(const DataLayout &DL,
9095                                   FunctionLoweringInfo *FuncInfo,
9096                                   ArgCopyElisionMapTy &ArgCopyElisionCandidates) {
9097   // Record the state of every static alloca used in the entry block. Argument
9098   // allocas are all used in the entry block, so we need approximately as many
9099   // entries as we have arguments.
9100   enum StaticAllocaInfo { Unknown, Clobbered, Elidable };
9101   SmallDenseMap<const AllocaInst *, StaticAllocaInfo, 8> StaticAllocas;
9102   unsigned NumArgs = FuncInfo->Fn->arg_size();
9103   StaticAllocas.reserve(NumArgs * 2);
9104 
9105   auto GetInfoIfStaticAlloca = [&](const Value *V) -> StaticAllocaInfo * {
9106     if (!V)
9107       return nullptr;
9108     V = V->stripPointerCasts();
9109     const auto *AI = dyn_cast<AllocaInst>(V);
9110     if (!AI || !AI->isStaticAlloca() || !FuncInfo->StaticAllocaMap.count(AI))
9111       return nullptr;
9112     auto Iter = StaticAllocas.insert({AI, Unknown});
9113     return &Iter.first->second;
9114   };
9115 
9116   // Look for stores of arguments to static allocas. Look through bitcasts and
9117   // GEPs to handle type coercions, as long as the alloca is fully initialized
9118   // by the store. Any non-store use of an alloca escapes it and any subsequent
9119   // unanalyzed store might write it.
9120   // FIXME: Handle structs initialized with multiple stores.
9121   for (const Instruction &I : FuncInfo->Fn->getEntryBlock()) {
9122     // Look for stores, and handle non-store uses conservatively.
9123     const auto *SI = dyn_cast<StoreInst>(&I);
9124     if (!SI) {
9125       // We will look through cast uses, so ignore them completely.
9126       if (I.isCast())
9127         continue;
9128       // Ignore debug info intrinsics, they don't escape or store to allocas.
9129       if (isa<DbgInfoIntrinsic>(I))
9130         continue;
9131       // This is an unknown instruction. Assume it escapes or writes to all
9132       // static alloca operands.
9133       for (const Use &U : I.operands()) {
9134         if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(U))
9135           *Info = StaticAllocaInfo::Clobbered;
9136       }
9137       continue;
9138     }
9139 
9140     // If the stored value is a static alloca, mark it as escaped.
9141     if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(SI->getValueOperand()))
9142       *Info = StaticAllocaInfo::Clobbered;
9143 
9144     // Check if the destination is a static alloca.
9145     const Value *Dst = SI->getPointerOperand()->stripPointerCasts();
9146     StaticAllocaInfo *Info = GetInfoIfStaticAlloca(Dst);
9147     if (!Info)
9148       continue;
9149     const AllocaInst *AI = cast<AllocaInst>(Dst);
9150 
9151     // Skip allocas that have been initialized or clobbered.
9152     if (*Info != StaticAllocaInfo::Unknown)
9153       continue;
9154 
9155     // Check if the stored value is an argument, and that this store fully
9156     // initializes the alloca. Don't elide copies from the same argument twice.
9157     const Value *Val = SI->getValueOperand()->stripPointerCasts();
9158     const auto *Arg = dyn_cast<Argument>(Val);
9159     if (!Arg || Arg->hasInAllocaAttr() || Arg->hasByValAttr() ||
9160         Arg->getType()->isEmptyTy() ||
9161         DL.getTypeStoreSize(Arg->getType()) !=
9162             DL.getTypeAllocSize(AI->getAllocatedType()) ||
9163         ArgCopyElisionCandidates.count(Arg)) {
9164       *Info = StaticAllocaInfo::Clobbered;
9165       continue;
9166     }
9167 
9168     LLVM_DEBUG(dbgs() << "Found argument copy elision candidate: " << *AI
9169                       << '\n');
9170 
9171     // Mark this alloca and store for argument copy elision.
9172     *Info = StaticAllocaInfo::Elidable;
9173     ArgCopyElisionCandidates.insert({Arg, {AI, SI}});
9174 
9175     // Stop scanning if we've seen all arguments. This will happen early in -O0
9176     // builds, which is useful, because -O0 builds have large entry blocks and
9177     // many allocas.
9178     if (ArgCopyElisionCandidates.size() == NumArgs)
9179       break;
9180   }
9181 }
9182 
9183 /// Try to elide argument copies from memory into a local alloca. Succeeds if
9184 /// ArgVal is a load from a suitable fixed stack object.
9185 static void tryToElideArgumentCopy(
9186     FunctionLoweringInfo *FuncInfo, SmallVectorImpl<SDValue> &Chains,
9187     DenseMap<int, int> &ArgCopyElisionFrameIndexMap,
9188     SmallPtrSetImpl<const Instruction *> &ElidedArgCopyInstrs,
9189     ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg,
9190     SDValue ArgVal, bool &ArgHasUses) {
9191   // Check if this is a load from a fixed stack object.
9192   auto *LNode = dyn_cast<LoadSDNode>(ArgVal);
9193   if (!LNode)
9194     return;
9195   auto *FINode = dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode());
9196   if (!FINode)
9197     return;
9198 
9199   // Check that the fixed stack object is the right size and alignment.
9200   // Look at the alignment that the user wrote on the alloca instead of looking
9201   // at the stack object.
9202   auto ArgCopyIter = ArgCopyElisionCandidates.find(&Arg);
9203   assert(ArgCopyIter != ArgCopyElisionCandidates.end());
9204   const AllocaInst *AI = ArgCopyIter->second.first;
9205   int FixedIndex = FINode->getIndex();
9206   int &AllocaIndex = FuncInfo->StaticAllocaMap[AI];
9207   int OldIndex = AllocaIndex;
9208   MachineFrameInfo &MFI = FuncInfo->MF->getFrameInfo();
9209   if (MFI.getObjectSize(FixedIndex) != MFI.getObjectSize(OldIndex)) {
9210     LLVM_DEBUG(
9211         dbgs() << "  argument copy elision failed due to bad fixed stack "
9212                   "object size\n");
9213     return;
9214   }
9215   unsigned RequiredAlignment = AI->getAlignment();
9216   if (!RequiredAlignment) {
9217     RequiredAlignment = FuncInfo->MF->getDataLayout().getABITypeAlignment(
9218         AI->getAllocatedType());
9219   }
9220   if (MFI.getObjectAlignment(FixedIndex) < RequiredAlignment) {
9221     LLVM_DEBUG(dbgs() << "  argument copy elision failed: alignment of alloca "
9222                          "greater than stack argument alignment ("
9223                       << RequiredAlignment << " vs "
9224                       << MFI.getObjectAlignment(FixedIndex) << ")\n");
9225     return;
9226   }
9227 
9228   // Perform the elision. Delete the old stack object and replace its only use
9229   // in the variable info map. Mark the stack object as mutable.
9230   LLVM_DEBUG({
9231     dbgs() << "Eliding argument copy from " << Arg << " to " << *AI << '\n'
9232            << "  Replacing frame index " << OldIndex << " with " << FixedIndex
9233            << '\n';
9234   });
9235   MFI.RemoveStackObject(OldIndex);
9236   MFI.setIsImmutableObjectIndex(FixedIndex, false);
9237   AllocaIndex = FixedIndex;
9238   ArgCopyElisionFrameIndexMap.insert({OldIndex, FixedIndex});
9239   Chains.push_back(ArgVal.getValue(1));
9240 
9241   // Avoid emitting code for the store implementing the copy.
9242   const StoreInst *SI = ArgCopyIter->second.second;
9243   ElidedArgCopyInstrs.insert(SI);
9244 
9245   // Check for uses of the argument again so that we can avoid exporting ArgVal
9246   // if it is't used by anything other than the store.
9247   for (const Value *U : Arg.users()) {
9248     if (U != SI) {
9249       ArgHasUses = true;
9250       break;
9251     }
9252   }
9253 }
9254 
9255 void SelectionDAGISel::LowerArguments(const Function &F) {
9256   SelectionDAG &DAG = SDB->DAG;
9257   SDLoc dl = SDB->getCurSDLoc();
9258   const DataLayout &DL = DAG.getDataLayout();
9259   SmallVector<ISD::InputArg, 16> Ins;
9260 
9261   if (!FuncInfo->CanLowerReturn) {
9262     // Put in an sret pointer parameter before all the other parameters.
9263     SmallVector<EVT, 1> ValueVTs;
9264     ComputeValueVTs(*TLI, DAG.getDataLayout(),
9265                     F.getReturnType()->getPointerTo(
9266                         DAG.getDataLayout().getAllocaAddrSpace()),
9267                     ValueVTs);
9268 
9269     // NOTE: Assuming that a pointer will never break down to more than one VT
9270     // or one register.
9271     ISD::ArgFlagsTy Flags;
9272     Flags.setSRet();
9273     MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVTs[0]);
9274     ISD::InputArg RetArg(Flags, RegisterVT, ValueVTs[0], true,
9275                          ISD::InputArg::NoArgIndex, 0);
9276     Ins.push_back(RetArg);
9277   }
9278 
9279   // Look for stores of arguments to static allocas. Mark such arguments with a
9280   // flag to ask the target to give us the memory location of that argument if
9281   // available.
9282   ArgCopyElisionMapTy ArgCopyElisionCandidates;
9283   findArgumentCopyElisionCandidates(DL, FuncInfo, ArgCopyElisionCandidates);
9284 
9285   // Set up the incoming argument description vector.
9286   for (const Argument &Arg : F.args()) {
9287     unsigned ArgNo = Arg.getArgNo();
9288     SmallVector<EVT, 4> ValueVTs;
9289     ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
9290     bool isArgValueUsed = !Arg.use_empty();
9291     unsigned PartBase = 0;
9292     Type *FinalType = Arg.getType();
9293     if (Arg.hasAttribute(Attribute::ByVal))
9294       FinalType = cast<PointerType>(FinalType)->getElementType();
9295     bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
9296         FinalType, F.getCallingConv(), F.isVarArg());
9297     for (unsigned Value = 0, NumValues = ValueVTs.size();
9298          Value != NumValues; ++Value) {
9299       EVT VT = ValueVTs[Value];
9300       Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
9301       ISD::ArgFlagsTy Flags;
9302 
9303       // Certain targets (such as MIPS), may have a different ABI alignment
9304       // for a type depending on the context. Give the target a chance to
9305       // specify the alignment it wants.
9306       unsigned OriginalAlignment =
9307           TLI->getABIAlignmentForCallingConv(ArgTy, DL);
9308 
9309       if (Arg.hasAttribute(Attribute::ZExt))
9310         Flags.setZExt();
9311       if (Arg.hasAttribute(Attribute::SExt))
9312         Flags.setSExt();
9313       if (Arg.hasAttribute(Attribute::InReg)) {
9314         // If we are using vectorcall calling convention, a structure that is
9315         // passed InReg - is surely an HVA
9316         if (F.getCallingConv() == CallingConv::X86_VectorCall &&
9317             isa<StructType>(Arg.getType())) {
9318           // The first value of a structure is marked
9319           if (0 == Value)
9320             Flags.setHvaStart();
9321           Flags.setHva();
9322         }
9323         // Set InReg Flag
9324         Flags.setInReg();
9325       }
9326       if (Arg.hasAttribute(Attribute::StructRet))
9327         Flags.setSRet();
9328       if (Arg.hasAttribute(Attribute::SwiftSelf))
9329         Flags.setSwiftSelf();
9330       if (Arg.hasAttribute(Attribute::SwiftError))
9331         Flags.setSwiftError();
9332       if (Arg.hasAttribute(Attribute::ByVal))
9333         Flags.setByVal();
9334       if (Arg.hasAttribute(Attribute::InAlloca)) {
9335         Flags.setInAlloca();
9336         // Set the byval flag for CCAssignFn callbacks that don't know about
9337         // inalloca.  This way we can know how many bytes we should've allocated
9338         // and how many bytes a callee cleanup function will pop.  If we port
9339         // inalloca to more targets, we'll have to add custom inalloca handling
9340         // in the various CC lowering callbacks.
9341         Flags.setByVal();
9342       }
9343       if (F.getCallingConv() == CallingConv::X86_INTR) {
9344         // IA Interrupt passes frame (1st parameter) by value in the stack.
9345         if (ArgNo == 0)
9346           Flags.setByVal();
9347       }
9348       if (Flags.isByVal() || Flags.isInAlloca()) {
9349         PointerType *Ty = cast<PointerType>(Arg.getType());
9350         Type *ElementTy = Ty->getElementType();
9351         Flags.setByValSize(DL.getTypeAllocSize(ElementTy));
9352         // For ByVal, alignment should be passed from FE.  BE will guess if
9353         // this info is not there but there are cases it cannot get right.
9354         unsigned FrameAlign;
9355         if (Arg.getParamAlignment())
9356           FrameAlign = Arg.getParamAlignment();
9357         else
9358           FrameAlign = TLI->getByValTypeAlignment(ElementTy, DL);
9359         Flags.setByValAlign(FrameAlign);
9360       }
9361       if (Arg.hasAttribute(Attribute::Nest))
9362         Flags.setNest();
9363       if (NeedsRegBlock)
9364         Flags.setInConsecutiveRegs();
9365       Flags.setOrigAlign(OriginalAlignment);
9366       if (ArgCopyElisionCandidates.count(&Arg))
9367         Flags.setCopyElisionCandidate();
9368 
9369       MVT RegisterVT = TLI->getRegisterTypeForCallingConv(
9370           *CurDAG->getContext(), F.getCallingConv(), VT);
9371       unsigned NumRegs = TLI->getNumRegistersForCallingConv(
9372           *CurDAG->getContext(), F.getCallingConv(), VT);
9373       for (unsigned i = 0; i != NumRegs; ++i) {
9374         ISD::InputArg MyFlags(Flags, RegisterVT, VT, isArgValueUsed,
9375                               ArgNo, PartBase+i*RegisterVT.getStoreSize());
9376         if (NumRegs > 1 && i == 0)
9377           MyFlags.Flags.setSplit();
9378         // if it isn't first piece, alignment must be 1
9379         else if (i > 0) {
9380           MyFlags.Flags.setOrigAlign(1);
9381           if (i == NumRegs - 1)
9382             MyFlags.Flags.setSplitEnd();
9383         }
9384         Ins.push_back(MyFlags);
9385       }
9386       if (NeedsRegBlock && Value == NumValues - 1)
9387         Ins[Ins.size() - 1].Flags.setInConsecutiveRegsLast();
9388       PartBase += VT.getStoreSize();
9389     }
9390   }
9391 
9392   // Call the target to set up the argument values.
9393   SmallVector<SDValue, 8> InVals;
9394   SDValue NewRoot = TLI->LowerFormalArguments(
9395       DAG.getRoot(), F.getCallingConv(), F.isVarArg(), Ins, dl, DAG, InVals);
9396 
9397   // Verify that the target's LowerFormalArguments behaved as expected.
9398   assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other &&
9399          "LowerFormalArguments didn't return a valid chain!");
9400   assert(InVals.size() == Ins.size() &&
9401          "LowerFormalArguments didn't emit the correct number of values!");
9402   LLVM_DEBUG({
9403     for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
9404       assert(InVals[i].getNode() &&
9405              "LowerFormalArguments emitted a null value!");
9406       assert(EVT(Ins[i].VT) == InVals[i].getValueType() &&
9407              "LowerFormalArguments emitted a value with the wrong type!");
9408     }
9409   });
9410 
9411   // Update the DAG with the new chain value resulting from argument lowering.
9412   DAG.setRoot(NewRoot);
9413 
9414   // Set up the argument values.
9415   unsigned i = 0;
9416   if (!FuncInfo->CanLowerReturn) {
9417     // Create a virtual register for the sret pointer, and put in a copy
9418     // from the sret argument into it.
9419     SmallVector<EVT, 1> ValueVTs;
9420     ComputeValueVTs(*TLI, DAG.getDataLayout(),
9421                     F.getReturnType()->getPointerTo(
9422                         DAG.getDataLayout().getAllocaAddrSpace()),
9423                     ValueVTs);
9424     MVT VT = ValueVTs[0].getSimpleVT();
9425     MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
9426     Optional<ISD::NodeType> AssertOp = None;
9427     SDValue ArgValue = getCopyFromParts(DAG, dl, &InVals[0], 1, RegVT, VT,
9428                                         nullptr, F.getCallingConv(), AssertOp);
9429 
9430     MachineFunction& MF = SDB->DAG.getMachineFunction();
9431     MachineRegisterInfo& RegInfo = MF.getRegInfo();
9432     unsigned SRetReg = RegInfo.createVirtualRegister(TLI->getRegClassFor(RegVT));
9433     FuncInfo->DemoteRegister = SRetReg;
9434     NewRoot =
9435         SDB->DAG.getCopyToReg(NewRoot, SDB->getCurSDLoc(), SRetReg, ArgValue);
9436     DAG.setRoot(NewRoot);
9437 
9438     // i indexes lowered arguments.  Bump it past the hidden sret argument.
9439     ++i;
9440   }
9441 
9442   SmallVector<SDValue, 4> Chains;
9443   DenseMap<int, int> ArgCopyElisionFrameIndexMap;
9444   for (const Argument &Arg : F.args()) {
9445     SmallVector<SDValue, 4> ArgValues;
9446     SmallVector<EVT, 4> ValueVTs;
9447     ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
9448     unsigned NumValues = ValueVTs.size();
9449     if (NumValues == 0)
9450       continue;
9451 
9452     bool ArgHasUses = !Arg.use_empty();
9453 
9454     // Elide the copying store if the target loaded this argument from a
9455     // suitable fixed stack object.
9456     if (Ins[i].Flags.isCopyElisionCandidate()) {
9457       tryToElideArgumentCopy(FuncInfo, Chains, ArgCopyElisionFrameIndexMap,
9458                              ElidedArgCopyInstrs, ArgCopyElisionCandidates, Arg,
9459                              InVals[i], ArgHasUses);
9460     }
9461 
9462     // If this argument is unused then remember its value. It is used to generate
9463     // debugging information.
9464     bool isSwiftErrorArg =
9465         TLI->supportSwiftError() &&
9466         Arg.hasAttribute(Attribute::SwiftError);
9467     if (!ArgHasUses && !isSwiftErrorArg) {
9468       SDB->setUnusedArgValue(&Arg, InVals[i]);
9469 
9470       // Also remember any frame index for use in FastISel.
9471       if (FrameIndexSDNode *FI =
9472           dyn_cast<FrameIndexSDNode>(InVals[i].getNode()))
9473         FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
9474     }
9475 
9476     for (unsigned Val = 0; Val != NumValues; ++Val) {
9477       EVT VT = ValueVTs[Val];
9478       MVT PartVT = TLI->getRegisterTypeForCallingConv(*CurDAG->getContext(),
9479                                                       F.getCallingConv(), VT);
9480       unsigned NumParts = TLI->getNumRegistersForCallingConv(
9481           *CurDAG->getContext(), F.getCallingConv(), VT);
9482 
9483       // Even an apparant 'unused' swifterror argument needs to be returned. So
9484       // we do generate a copy for it that can be used on return from the
9485       // function.
9486       if (ArgHasUses || isSwiftErrorArg) {
9487         Optional<ISD::NodeType> AssertOp;
9488         if (Arg.hasAttribute(Attribute::SExt))
9489           AssertOp = ISD::AssertSext;
9490         else if (Arg.hasAttribute(Attribute::ZExt))
9491           AssertOp = ISD::AssertZext;
9492 
9493         ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], NumParts,
9494                                              PartVT, VT, nullptr,
9495                                              F.getCallingConv(), AssertOp));
9496       }
9497 
9498       i += NumParts;
9499     }
9500 
9501     // We don't need to do anything else for unused arguments.
9502     if (ArgValues.empty())
9503       continue;
9504 
9505     // Note down frame index.
9506     if (FrameIndexSDNode *FI =
9507         dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode()))
9508       FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
9509 
9510     SDValue Res = DAG.getMergeValues(makeArrayRef(ArgValues.data(), NumValues),
9511                                      SDB->getCurSDLoc());
9512 
9513     SDB->setValue(&Arg, Res);
9514     if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::BUILD_PAIR) {
9515       // We want to associate the argument with the frame index, among
9516       // involved operands, that correspond to the lowest address. The
9517       // getCopyFromParts function, called earlier, is swapping the order of
9518       // the operands to BUILD_PAIR depending on endianness. The result of
9519       // that swapping is that the least significant bits of the argument will
9520       // be in the first operand of the BUILD_PAIR node, and the most
9521       // significant bits will be in the second operand.
9522       unsigned LowAddressOp = DAG.getDataLayout().isBigEndian() ? 1 : 0;
9523       if (LoadSDNode *LNode =
9524           dyn_cast<LoadSDNode>(Res.getOperand(LowAddressOp).getNode()))
9525         if (FrameIndexSDNode *FI =
9526             dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
9527           FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
9528     }
9529 
9530     // Update the SwiftErrorVRegDefMap.
9531     if (Res.getOpcode() == ISD::CopyFromReg && isSwiftErrorArg) {
9532       unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
9533       if (TargetRegisterInfo::isVirtualRegister(Reg))
9534         FuncInfo->setCurrentSwiftErrorVReg(FuncInfo->MBB,
9535                                            FuncInfo->SwiftErrorArg, Reg);
9536     }
9537 
9538     // If this argument is live outside of the entry block, insert a copy from
9539     // wherever we got it to the vreg that other BB's will reference it as.
9540     if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::CopyFromReg) {
9541       // If we can, though, try to skip creating an unnecessary vreg.
9542       // FIXME: This isn't very clean... it would be nice to make this more
9543       // general.  It's also subtly incompatible with the hacks FastISel
9544       // uses with vregs.
9545       unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
9546       if (TargetRegisterInfo::isVirtualRegister(Reg)) {
9547         FuncInfo->ValueMap[&Arg] = Reg;
9548         continue;
9549       }
9550     }
9551     if (!isOnlyUsedInEntryBlock(&Arg, TM.Options.EnableFastISel)) {
9552       FuncInfo->InitializeRegForValue(&Arg);
9553       SDB->CopyToExportRegsIfNeeded(&Arg);
9554     }
9555   }
9556 
9557   if (!Chains.empty()) {
9558     Chains.push_back(NewRoot);
9559     NewRoot = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
9560   }
9561 
9562   DAG.setRoot(NewRoot);
9563 
9564   assert(i == InVals.size() && "Argument register count mismatch!");
9565 
9566   // If any argument copy elisions occurred and we have debug info, update the
9567   // stale frame indices used in the dbg.declare variable info table.
9568   MachineFunction::VariableDbgInfoMapTy &DbgDeclareInfo = MF->getVariableDbgInfo();
9569   if (!DbgDeclareInfo.empty() && !ArgCopyElisionFrameIndexMap.empty()) {
9570     for (MachineFunction::VariableDbgInfo &VI : DbgDeclareInfo) {
9571       auto I = ArgCopyElisionFrameIndexMap.find(VI.Slot);
9572       if (I != ArgCopyElisionFrameIndexMap.end())
9573         VI.Slot = I->second;
9574     }
9575   }
9576 
9577   // Finally, if the target has anything special to do, allow it to do so.
9578   EmitFunctionEntryCode();
9579 }
9580 
9581 /// Handle PHI nodes in successor blocks.  Emit code into the SelectionDAG to
9582 /// ensure constants are generated when needed.  Remember the virtual registers
9583 /// that need to be added to the Machine PHI nodes as input.  We cannot just
9584 /// directly add them, because expansion might result in multiple MBB's for one
9585 /// BB.  As such, the start of the BB might correspond to a different MBB than
9586 /// the end.
9587 void
9588 SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
9589   const Instruction *TI = LLVMBB->getTerminator();
9590 
9591   SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
9592 
9593   // Check PHI nodes in successors that expect a value to be available from this
9594   // block.
9595   for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
9596     const BasicBlock *SuccBB = TI->getSuccessor(succ);
9597     if (!isa<PHINode>(SuccBB->begin())) continue;
9598     MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
9599 
9600     // If this terminator has multiple identical successors (common for
9601     // switches), only handle each succ once.
9602     if (!SuccsHandled.insert(SuccMBB).second)
9603       continue;
9604 
9605     MachineBasicBlock::iterator MBBI = SuccMBB->begin();
9606 
9607     // At this point we know that there is a 1-1 correspondence between LLVM PHI
9608     // nodes and Machine PHI nodes, but the incoming operands have not been
9609     // emitted yet.
9610     for (const PHINode &PN : SuccBB->phis()) {
9611       // Ignore dead phi's.
9612       if (PN.use_empty())
9613         continue;
9614 
9615       // Skip empty types
9616       if (PN.getType()->isEmptyTy())
9617         continue;
9618 
9619       unsigned Reg;
9620       const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
9621 
9622       if (const Constant *C = dyn_cast<Constant>(PHIOp)) {
9623         unsigned &RegOut = ConstantsOut[C];
9624         if (RegOut == 0) {
9625           RegOut = FuncInfo.CreateRegs(C->getType());
9626           CopyValueToVirtualRegister(C, RegOut);
9627         }
9628         Reg = RegOut;
9629       } else {
9630         DenseMap<const Value *, unsigned>::iterator I =
9631           FuncInfo.ValueMap.find(PHIOp);
9632         if (I != FuncInfo.ValueMap.end())
9633           Reg = I->second;
9634         else {
9635           assert(isa<AllocaInst>(PHIOp) &&
9636                  FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
9637                  "Didn't codegen value into a register!??");
9638           Reg = FuncInfo.CreateRegs(PHIOp->getType());
9639           CopyValueToVirtualRegister(PHIOp, Reg);
9640         }
9641       }
9642 
9643       // Remember that this register needs to added to the machine PHI node as
9644       // the input for this MBB.
9645       SmallVector<EVT, 4> ValueVTs;
9646       const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9647       ComputeValueVTs(TLI, DAG.getDataLayout(), PN.getType(), ValueVTs);
9648       for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
9649         EVT VT = ValueVTs[vti];
9650         unsigned NumRegisters = TLI.getNumRegisters(*DAG.getContext(), VT);
9651         for (unsigned i = 0, e = NumRegisters; i != e; ++i)
9652           FuncInfo.PHINodesToUpdate.push_back(
9653               std::make_pair(&*MBBI++, Reg + i));
9654         Reg += NumRegisters;
9655       }
9656     }
9657   }
9658 
9659   ConstantsOut.clear();
9660 }
9661 
9662 /// Add a successor MBB to ParentMBB< creating a new MachineBB for BB if SuccMBB
9663 /// is 0.
9664 MachineBasicBlock *
9665 SelectionDAGBuilder::StackProtectorDescriptor::
9666 AddSuccessorMBB(const BasicBlock *BB,
9667                 MachineBasicBlock *ParentMBB,
9668                 bool IsLikely,
9669                 MachineBasicBlock *SuccMBB) {
9670   // If SuccBB has not been created yet, create it.
9671   if (!SuccMBB) {
9672     MachineFunction *MF = ParentMBB->getParent();
9673     MachineFunction::iterator BBI(ParentMBB);
9674     SuccMBB = MF->CreateMachineBasicBlock(BB);
9675     MF->insert(++BBI, SuccMBB);
9676   }
9677   // Add it as a successor of ParentMBB.
9678   ParentMBB->addSuccessor(
9679       SuccMBB, BranchProbabilityInfo::getBranchProbStackProtector(IsLikely));
9680   return SuccMBB;
9681 }
9682 
9683 MachineBasicBlock *SelectionDAGBuilder::NextBlock(MachineBasicBlock *MBB) {
9684   MachineFunction::iterator I(MBB);
9685   if (++I == FuncInfo.MF->end())
9686     return nullptr;
9687   return &*I;
9688 }
9689 
9690 /// During lowering new call nodes can be created (such as memset, etc.).
9691 /// Those will become new roots of the current DAG, but complications arise
9692 /// when they are tail calls. In such cases, the call lowering will update
9693 /// the root, but the builder still needs to know that a tail call has been
9694 /// lowered in order to avoid generating an additional return.
9695 void SelectionDAGBuilder::updateDAGForMaybeTailCall(SDValue MaybeTC) {
9696   // If the node is null, we do have a tail call.
9697   if (MaybeTC.getNode() != nullptr)
9698     DAG.setRoot(MaybeTC);
9699   else
9700     HasTailCall = true;
9701 }
9702 
9703 uint64_t
9704 SelectionDAGBuilder::getJumpTableRange(const CaseClusterVector &Clusters,
9705                                        unsigned First, unsigned Last) const {
9706   assert(Last >= First);
9707   const APInt &LowCase = Clusters[First].Low->getValue();
9708   const APInt &HighCase = Clusters[Last].High->getValue();
9709   assert(LowCase.getBitWidth() == HighCase.getBitWidth());
9710 
9711   // FIXME: A range of consecutive cases has 100% density, but only requires one
9712   // comparison to lower. We should discriminate against such consecutive ranges
9713   // in jump tables.
9714 
9715   return (HighCase - LowCase).getLimitedValue((UINT64_MAX - 1) / 100) + 1;
9716 }
9717 
9718 uint64_t SelectionDAGBuilder::getJumpTableNumCases(
9719     const SmallVectorImpl<unsigned> &TotalCases, unsigned First,
9720     unsigned Last) const {
9721   assert(Last >= First);
9722   assert(TotalCases[Last] >= TotalCases[First]);
9723   uint64_t NumCases =
9724       TotalCases[Last] - (First == 0 ? 0 : TotalCases[First - 1]);
9725   return NumCases;
9726 }
9727 
9728 bool SelectionDAGBuilder::buildJumpTable(const CaseClusterVector &Clusters,
9729                                          unsigned First, unsigned Last,
9730                                          const SwitchInst *SI,
9731                                          MachineBasicBlock *DefaultMBB,
9732                                          CaseCluster &JTCluster) {
9733   assert(First <= Last);
9734 
9735   auto Prob = BranchProbability::getZero();
9736   unsigned NumCmps = 0;
9737   std::vector<MachineBasicBlock*> Table;
9738   DenseMap<MachineBasicBlock*, BranchProbability> JTProbs;
9739 
9740   // Initialize probabilities in JTProbs.
9741   for (unsigned I = First; I <= Last; ++I)
9742     JTProbs[Clusters[I].MBB] = BranchProbability::getZero();
9743 
9744   for (unsigned I = First; I <= Last; ++I) {
9745     assert(Clusters[I].Kind == CC_Range);
9746     Prob += Clusters[I].Prob;
9747     const APInt &Low = Clusters[I].Low->getValue();
9748     const APInt &High = Clusters[I].High->getValue();
9749     NumCmps += (Low == High) ? 1 : 2;
9750     if (I != First) {
9751       // Fill the gap between this and the previous cluster.
9752       const APInt &PreviousHigh = Clusters[I - 1].High->getValue();
9753       assert(PreviousHigh.slt(Low));
9754       uint64_t Gap = (Low - PreviousHigh).getLimitedValue() - 1;
9755       for (uint64_t J = 0; J < Gap; J++)
9756         Table.push_back(DefaultMBB);
9757     }
9758     uint64_t ClusterSize = (High - Low).getLimitedValue() + 1;
9759     for (uint64_t J = 0; J < ClusterSize; ++J)
9760       Table.push_back(Clusters[I].MBB);
9761     JTProbs[Clusters[I].MBB] += Clusters[I].Prob;
9762   }
9763 
9764   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9765   unsigned NumDests = JTProbs.size();
9766   if (TLI.isSuitableForBitTests(
9767           NumDests, NumCmps, Clusters[First].Low->getValue(),
9768           Clusters[Last].High->getValue(), DAG.getDataLayout())) {
9769     // Clusters[First..Last] should be lowered as bit tests instead.
9770     return false;
9771   }
9772 
9773   // Create the MBB that will load from and jump through the table.
9774   // Note: We create it here, but it's not inserted into the function yet.
9775   MachineFunction *CurMF = FuncInfo.MF;
9776   MachineBasicBlock *JumpTableMBB =
9777       CurMF->CreateMachineBasicBlock(SI->getParent());
9778 
9779   // Add successors. Note: use table order for determinism.
9780   SmallPtrSet<MachineBasicBlock *, 8> Done;
9781   for (MachineBasicBlock *Succ : Table) {
9782     if (Done.count(Succ))
9783       continue;
9784     addSuccessorWithProb(JumpTableMBB, Succ, JTProbs[Succ]);
9785     Done.insert(Succ);
9786   }
9787   JumpTableMBB->normalizeSuccProbs();
9788 
9789   unsigned JTI = CurMF->getOrCreateJumpTableInfo(TLI.getJumpTableEncoding())
9790                      ->createJumpTableIndex(Table);
9791 
9792   // Set up the jump table info.
9793   JumpTable JT(-1U, JTI, JumpTableMBB, nullptr);
9794   JumpTableHeader JTH(Clusters[First].Low->getValue(),
9795                       Clusters[Last].High->getValue(), SI->getCondition(),
9796                       nullptr, false);
9797   JTCases.emplace_back(std::move(JTH), std::move(JT));
9798 
9799   JTCluster = CaseCluster::jumpTable(Clusters[First].Low, Clusters[Last].High,
9800                                      JTCases.size() - 1, Prob);
9801   return true;
9802 }
9803 
9804 void SelectionDAGBuilder::findJumpTables(CaseClusterVector &Clusters,
9805                                          const SwitchInst *SI,
9806                                          MachineBasicBlock *DefaultMBB) {
9807 #ifndef NDEBUG
9808   // Clusters must be non-empty, sorted, and only contain Range clusters.
9809   assert(!Clusters.empty());
9810   for (CaseCluster &C : Clusters)
9811     assert(C.Kind == CC_Range);
9812   for (unsigned i = 1, e = Clusters.size(); i < e; ++i)
9813     assert(Clusters[i - 1].High->getValue().slt(Clusters[i].Low->getValue()));
9814 #endif
9815 
9816   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9817   if (!TLI.areJTsAllowed(SI->getParent()->getParent()))
9818     return;
9819 
9820   const int64_t N = Clusters.size();
9821   const unsigned MinJumpTableEntries = TLI.getMinimumJumpTableEntries();
9822   const unsigned SmallNumberOfEntries = MinJumpTableEntries / 2;
9823 
9824   if (N < 2 || N < MinJumpTableEntries)
9825     return;
9826 
9827   // TotalCases[i]: Total nbr of cases in Clusters[0..i].
9828   SmallVector<unsigned, 8> TotalCases(N);
9829   for (unsigned i = 0; i < N; ++i) {
9830     const APInt &Hi = Clusters[i].High->getValue();
9831     const APInt &Lo = Clusters[i].Low->getValue();
9832     TotalCases[i] = (Hi - Lo).getLimitedValue() + 1;
9833     if (i != 0)
9834       TotalCases[i] += TotalCases[i - 1];
9835   }
9836 
9837   // Cheap case: the whole range may be suitable for jump table.
9838   uint64_t Range = getJumpTableRange(Clusters,0, N - 1);
9839   uint64_t NumCases = getJumpTableNumCases(TotalCases, 0, N - 1);
9840   assert(NumCases < UINT64_MAX / 100);
9841   assert(Range >= NumCases);
9842   if (TLI.isSuitableForJumpTable(SI, NumCases, Range)) {
9843     CaseCluster JTCluster;
9844     if (buildJumpTable(Clusters, 0, N - 1, SI, DefaultMBB, JTCluster)) {
9845       Clusters[0] = JTCluster;
9846       Clusters.resize(1);
9847       return;
9848     }
9849   }
9850 
9851   // The algorithm below is not suitable for -O0.
9852   if (TM.getOptLevel() == CodeGenOpt::None)
9853     return;
9854 
9855   // Split Clusters into minimum number of dense partitions. The algorithm uses
9856   // the same idea as Kannan & Proebsting "Correction to 'Producing Good Code
9857   // for the Case Statement'" (1994), but builds the MinPartitions array in
9858   // reverse order to make it easier to reconstruct the partitions in ascending
9859   // order. In the choice between two optimal partitionings, it picks the one
9860   // which yields more jump tables.
9861 
9862   // MinPartitions[i] is the minimum nbr of partitions of Clusters[i..N-1].
9863   SmallVector<unsigned, 8> MinPartitions(N);
9864   // LastElement[i] is the last element of the partition starting at i.
9865   SmallVector<unsigned, 8> LastElement(N);
9866   // PartitionsScore[i] is used to break ties when choosing between two
9867   // partitionings resulting in the same number of partitions.
9868   SmallVector<unsigned, 8> PartitionsScore(N);
9869   // For PartitionsScore, a small number of comparisons is considered as good as
9870   // a jump table and a single comparison is considered better than a jump
9871   // table.
9872   enum PartitionScores : unsigned {
9873     NoTable = 0,
9874     Table = 1,
9875     FewCases = 1,
9876     SingleCase = 2
9877   };
9878 
9879   // Base case: There is only one way to partition Clusters[N-1].
9880   MinPartitions[N - 1] = 1;
9881   LastElement[N - 1] = N - 1;
9882   PartitionsScore[N - 1] = PartitionScores::SingleCase;
9883 
9884   // Note: loop indexes are signed to avoid underflow.
9885   for (int64_t i = N - 2; i >= 0; i--) {
9886     // Find optimal partitioning of Clusters[i..N-1].
9887     // Baseline: Put Clusters[i] into a partition on its own.
9888     MinPartitions[i] = MinPartitions[i + 1] + 1;
9889     LastElement[i] = i;
9890     PartitionsScore[i] = PartitionsScore[i + 1] + PartitionScores::SingleCase;
9891 
9892     // Search for a solution that results in fewer partitions.
9893     for (int64_t j = N - 1; j > i; j--) {
9894       // Try building a partition from Clusters[i..j].
9895       uint64_t Range = getJumpTableRange(Clusters, i, j);
9896       uint64_t NumCases = getJumpTableNumCases(TotalCases, i, j);
9897       assert(NumCases < UINT64_MAX / 100);
9898       assert(Range >= NumCases);
9899       if (TLI.isSuitableForJumpTable(SI, NumCases, Range)) {
9900         unsigned NumPartitions = 1 + (j == N - 1 ? 0 : MinPartitions[j + 1]);
9901         unsigned Score = j == N - 1 ? 0 : PartitionsScore[j + 1];
9902         int64_t NumEntries = j - i + 1;
9903 
9904         if (NumEntries == 1)
9905           Score += PartitionScores::SingleCase;
9906         else if (NumEntries <= SmallNumberOfEntries)
9907           Score += PartitionScores::FewCases;
9908         else if (NumEntries >= MinJumpTableEntries)
9909           Score += PartitionScores::Table;
9910 
9911         // If this leads to fewer partitions, or to the same number of
9912         // partitions with better score, it is a better partitioning.
9913         if (NumPartitions < MinPartitions[i] ||
9914             (NumPartitions == MinPartitions[i] && Score > PartitionsScore[i])) {
9915           MinPartitions[i] = NumPartitions;
9916           LastElement[i] = j;
9917           PartitionsScore[i] = Score;
9918         }
9919       }
9920     }
9921   }
9922 
9923   // Iterate over the partitions, replacing some with jump tables in-place.
9924   unsigned DstIndex = 0;
9925   for (unsigned First = 0, Last; First < N; First = Last + 1) {
9926     Last = LastElement[First];
9927     assert(Last >= First);
9928     assert(DstIndex <= First);
9929     unsigned NumClusters = Last - First + 1;
9930 
9931     CaseCluster JTCluster;
9932     if (NumClusters >= MinJumpTableEntries &&
9933         buildJumpTable(Clusters, First, Last, SI, DefaultMBB, JTCluster)) {
9934       Clusters[DstIndex++] = JTCluster;
9935     } else {
9936       for (unsigned I = First; I <= Last; ++I)
9937         std::memmove(&Clusters[DstIndex++], &Clusters[I], sizeof(Clusters[I]));
9938     }
9939   }
9940   Clusters.resize(DstIndex);
9941 }
9942 
9943 bool SelectionDAGBuilder::buildBitTests(CaseClusterVector &Clusters,
9944                                         unsigned First, unsigned Last,
9945                                         const SwitchInst *SI,
9946                                         CaseCluster &BTCluster) {
9947   assert(First <= Last);
9948   if (First == Last)
9949     return false;
9950 
9951   BitVector Dests(FuncInfo.MF->getNumBlockIDs());
9952   unsigned NumCmps = 0;
9953   for (int64_t I = First; I <= Last; ++I) {
9954     assert(Clusters[I].Kind == CC_Range);
9955     Dests.set(Clusters[I].MBB->getNumber());
9956     NumCmps += (Clusters[I].Low == Clusters[I].High) ? 1 : 2;
9957   }
9958   unsigned NumDests = Dests.count();
9959 
9960   APInt Low = Clusters[First].Low->getValue();
9961   APInt High = Clusters[Last].High->getValue();
9962   assert(Low.slt(High));
9963 
9964   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9965   const DataLayout &DL = DAG.getDataLayout();
9966   if (!TLI.isSuitableForBitTests(NumDests, NumCmps, Low, High, DL))
9967     return false;
9968 
9969   APInt LowBound;
9970   APInt CmpRange;
9971 
9972   const int BitWidth = TLI.getPointerTy(DL).getSizeInBits();
9973   assert(TLI.rangeFitsInWord(Low, High, DL) &&
9974          "Case range must fit in bit mask!");
9975 
9976   // Check if the clusters cover a contiguous range such that no value in the
9977   // range will jump to the default statement.
9978   bool ContiguousRange = true;
9979   for (int64_t I = First + 1; I <= Last; ++I) {
9980     if (Clusters[I].Low->getValue() != Clusters[I - 1].High->getValue() + 1) {
9981       ContiguousRange = false;
9982       break;
9983     }
9984   }
9985 
9986   if (Low.isStrictlyPositive() && High.slt(BitWidth)) {
9987     // Optimize the case where all the case values fit in a word without having
9988     // to subtract minValue. In this case, we can optimize away the subtraction.
9989     LowBound = APInt::getNullValue(Low.getBitWidth());
9990     CmpRange = High;
9991     ContiguousRange = false;
9992   } else {
9993     LowBound = Low;
9994     CmpRange = High - Low;
9995   }
9996 
9997   CaseBitsVector CBV;
9998   auto TotalProb = BranchProbability::getZero();
9999   for (unsigned i = First; i <= Last; ++i) {
10000     // Find the CaseBits for this destination.
10001     unsigned j;
10002     for (j = 0; j < CBV.size(); ++j)
10003       if (CBV[j].BB == Clusters[i].MBB)
10004         break;
10005     if (j == CBV.size())
10006       CBV.push_back(
10007           CaseBits(0, Clusters[i].MBB, 0, BranchProbability::getZero()));
10008     CaseBits *CB = &CBV[j];
10009 
10010     // Update Mask, Bits and ExtraProb.
10011     uint64_t Lo = (Clusters[i].Low->getValue() - LowBound).getZExtValue();
10012     uint64_t Hi = (Clusters[i].High->getValue() - LowBound).getZExtValue();
10013     assert(Hi >= Lo && Hi < 64 && "Invalid bit case!");
10014     CB->Mask |= (-1ULL >> (63 - (Hi - Lo))) << Lo;
10015     CB->Bits += Hi - Lo + 1;
10016     CB->ExtraProb += Clusters[i].Prob;
10017     TotalProb += Clusters[i].Prob;
10018   }
10019 
10020   BitTestInfo BTI;
10021   llvm::sort(CBV, [](const CaseBits &a, const CaseBits &b) {
10022     // Sort by probability first, number of bits second, bit mask third.
10023     if (a.ExtraProb != b.ExtraProb)
10024       return a.ExtraProb > b.ExtraProb;
10025     if (a.Bits != b.Bits)
10026       return a.Bits > b.Bits;
10027     return a.Mask < b.Mask;
10028   });
10029 
10030   for (auto &CB : CBV) {
10031     MachineBasicBlock *BitTestBB =
10032         FuncInfo.MF->CreateMachineBasicBlock(SI->getParent());
10033     BTI.push_back(BitTestCase(CB.Mask, BitTestBB, CB.BB, CB.ExtraProb));
10034   }
10035   BitTestCases.emplace_back(std::move(LowBound), std::move(CmpRange),
10036                             SI->getCondition(), -1U, MVT::Other, false,
10037                             ContiguousRange, nullptr, nullptr, std::move(BTI),
10038                             TotalProb);
10039 
10040   BTCluster = CaseCluster::bitTests(Clusters[First].Low, Clusters[Last].High,
10041                                     BitTestCases.size() - 1, TotalProb);
10042   return true;
10043 }
10044 
10045 void SelectionDAGBuilder::findBitTestClusters(CaseClusterVector &Clusters,
10046                                               const SwitchInst *SI) {
10047 // Partition Clusters into as few subsets as possible, where each subset has a
10048 // range that fits in a machine word and has <= 3 unique destinations.
10049 
10050 #ifndef NDEBUG
10051   // Clusters must be sorted and contain Range or JumpTable clusters.
10052   assert(!Clusters.empty());
10053   assert(Clusters[0].Kind == CC_Range || Clusters[0].Kind == CC_JumpTable);
10054   for (const CaseCluster &C : Clusters)
10055     assert(C.Kind == CC_Range || C.Kind == CC_JumpTable);
10056   for (unsigned i = 1; i < Clusters.size(); ++i)
10057     assert(Clusters[i-1].High->getValue().slt(Clusters[i].Low->getValue()));
10058 #endif
10059 
10060   // The algorithm below is not suitable for -O0.
10061   if (TM.getOptLevel() == CodeGenOpt::None)
10062     return;
10063 
10064   // If target does not have legal shift left, do not emit bit tests at all.
10065   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10066   const DataLayout &DL = DAG.getDataLayout();
10067 
10068   EVT PTy = TLI.getPointerTy(DL);
10069   if (!TLI.isOperationLegal(ISD::SHL, PTy))
10070     return;
10071 
10072   int BitWidth = PTy.getSizeInBits();
10073   const int64_t N = Clusters.size();
10074 
10075   // MinPartitions[i] is the minimum nbr of partitions of Clusters[i..N-1].
10076   SmallVector<unsigned, 8> MinPartitions(N);
10077   // LastElement[i] is the last element of the partition starting at i.
10078   SmallVector<unsigned, 8> LastElement(N);
10079 
10080   // FIXME: This might not be the best algorithm for finding bit test clusters.
10081 
10082   // Base case: There is only one way to partition Clusters[N-1].
10083   MinPartitions[N - 1] = 1;
10084   LastElement[N - 1] = N - 1;
10085 
10086   // Note: loop indexes are signed to avoid underflow.
10087   for (int64_t i = N - 2; i >= 0; --i) {
10088     // Find optimal partitioning of Clusters[i..N-1].
10089     // Baseline: Put Clusters[i] into a partition on its own.
10090     MinPartitions[i] = MinPartitions[i + 1] + 1;
10091     LastElement[i] = i;
10092 
10093     // Search for a solution that results in fewer partitions.
10094     // Note: the search is limited by BitWidth, reducing time complexity.
10095     for (int64_t j = std::min(N - 1, i + BitWidth - 1); j > i; --j) {
10096       // Try building a partition from Clusters[i..j].
10097 
10098       // Check the range.
10099       if (!TLI.rangeFitsInWord(Clusters[i].Low->getValue(),
10100                                Clusters[j].High->getValue(), DL))
10101         continue;
10102 
10103       // Check nbr of destinations and cluster types.
10104       // FIXME: This works, but doesn't seem very efficient.
10105       bool RangesOnly = true;
10106       BitVector Dests(FuncInfo.MF->getNumBlockIDs());
10107       for (int64_t k = i; k <= j; k++) {
10108         if (Clusters[k].Kind != CC_Range) {
10109           RangesOnly = false;
10110           break;
10111         }
10112         Dests.set(Clusters[k].MBB->getNumber());
10113       }
10114       if (!RangesOnly || Dests.count() > 3)
10115         break;
10116 
10117       // Check if it's a better partition.
10118       unsigned NumPartitions = 1 + (j == N - 1 ? 0 : MinPartitions[j + 1]);
10119       if (NumPartitions < MinPartitions[i]) {
10120         // Found a better partition.
10121         MinPartitions[i] = NumPartitions;
10122         LastElement[i] = j;
10123       }
10124     }
10125   }
10126 
10127   // Iterate over the partitions, replacing with bit-test clusters in-place.
10128   unsigned DstIndex = 0;
10129   for (unsigned First = 0, Last; First < N; First = Last + 1) {
10130     Last = LastElement[First];
10131     assert(First <= Last);
10132     assert(DstIndex <= First);
10133 
10134     CaseCluster BitTestCluster;
10135     if (buildBitTests(Clusters, First, Last, SI, BitTestCluster)) {
10136       Clusters[DstIndex++] = BitTestCluster;
10137     } else {
10138       size_t NumClusters = Last - First + 1;
10139       std::memmove(&Clusters[DstIndex], &Clusters[First],
10140                    sizeof(Clusters[0]) * NumClusters);
10141       DstIndex += NumClusters;
10142     }
10143   }
10144   Clusters.resize(DstIndex);
10145 }
10146 
10147 void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W, Value *Cond,
10148                                         MachineBasicBlock *SwitchMBB,
10149                                         MachineBasicBlock *DefaultMBB) {
10150   MachineFunction *CurMF = FuncInfo.MF;
10151   MachineBasicBlock *NextMBB = nullptr;
10152   MachineFunction::iterator BBI(W.MBB);
10153   if (++BBI != FuncInfo.MF->end())
10154     NextMBB = &*BBI;
10155 
10156   unsigned Size = W.LastCluster - W.FirstCluster + 1;
10157 
10158   BranchProbabilityInfo *BPI = FuncInfo.BPI;
10159 
10160   if (Size == 2 && W.MBB == SwitchMBB) {
10161     // If any two of the cases has the same destination, and if one value
10162     // is the same as the other, but has one bit unset that the other has set,
10163     // use bit manipulation to do two compares at once.  For example:
10164     // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
10165     // TODO: This could be extended to merge any 2 cases in switches with 3
10166     // cases.
10167     // TODO: Handle cases where W.CaseBB != SwitchBB.
10168     CaseCluster &Small = *W.FirstCluster;
10169     CaseCluster &Big = *W.LastCluster;
10170 
10171     if (Small.Low == Small.High && Big.Low == Big.High &&
10172         Small.MBB == Big.MBB) {
10173       const APInt &SmallValue = Small.Low->getValue();
10174       const APInt &BigValue = Big.Low->getValue();
10175 
10176       // Check that there is only one bit different.
10177       APInt CommonBit = BigValue ^ SmallValue;
10178       if (CommonBit.isPowerOf2()) {
10179         SDValue CondLHS = getValue(Cond);
10180         EVT VT = CondLHS.getValueType();
10181         SDLoc DL = getCurSDLoc();
10182 
10183         SDValue Or = DAG.getNode(ISD::OR, DL, VT, CondLHS,
10184                                  DAG.getConstant(CommonBit, DL, VT));
10185         SDValue Cond = DAG.getSetCC(
10186             DL, MVT::i1, Or, DAG.getConstant(BigValue | SmallValue, DL, VT),
10187             ISD::SETEQ);
10188 
10189         // Update successor info.
10190         // Both Small and Big will jump to Small.BB, so we sum up the
10191         // probabilities.
10192         addSuccessorWithProb(SwitchMBB, Small.MBB, Small.Prob + Big.Prob);
10193         if (BPI)
10194           addSuccessorWithProb(
10195               SwitchMBB, DefaultMBB,
10196               // The default destination is the first successor in IR.
10197               BPI->getEdgeProbability(SwitchMBB->getBasicBlock(), (unsigned)0));
10198         else
10199           addSuccessorWithProb(SwitchMBB, DefaultMBB);
10200 
10201         // Insert the true branch.
10202         SDValue BrCond =
10203             DAG.getNode(ISD::BRCOND, DL, MVT::Other, getControlRoot(), Cond,
10204                         DAG.getBasicBlock(Small.MBB));
10205         // Insert the false branch.
10206         BrCond = DAG.getNode(ISD::BR, DL, MVT::Other, BrCond,
10207                              DAG.getBasicBlock(DefaultMBB));
10208 
10209         DAG.setRoot(BrCond);
10210         return;
10211       }
10212     }
10213   }
10214 
10215   if (TM.getOptLevel() != CodeGenOpt::None) {
10216     // Here, we order cases by probability so the most likely case will be
10217     // checked first. However, two clusters can have the same probability in
10218     // which case their relative ordering is non-deterministic. So we use Low
10219     // as a tie-breaker as clusters are guaranteed to never overlap.
10220     llvm::sort(W.FirstCluster, W.LastCluster + 1,
10221                [](const CaseCluster &a, const CaseCluster &b) {
10222       return a.Prob != b.Prob ?
10223              a.Prob > b.Prob :
10224              a.Low->getValue().slt(b.Low->getValue());
10225     });
10226 
10227     // Rearrange the case blocks so that the last one falls through if possible
10228     // without changing the order of probabilities.
10229     for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster; ) {
10230       --I;
10231       if (I->Prob > W.LastCluster->Prob)
10232         break;
10233       if (I->Kind == CC_Range && I->MBB == NextMBB) {
10234         std::swap(*I, *W.LastCluster);
10235         break;
10236       }
10237     }
10238   }
10239 
10240   // Compute total probability.
10241   BranchProbability DefaultProb = W.DefaultProb;
10242   BranchProbability UnhandledProbs = DefaultProb;
10243   for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
10244     UnhandledProbs += I->Prob;
10245 
10246   MachineBasicBlock *CurMBB = W.MBB;
10247   for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
10248     MachineBasicBlock *Fallthrough;
10249     if (I == W.LastCluster) {
10250       // For the last cluster, fall through to the default destination.
10251       Fallthrough = DefaultMBB;
10252     } else {
10253       Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
10254       CurMF->insert(BBI, Fallthrough);
10255       // Put Cond in a virtual register to make it available from the new blocks.
10256       ExportFromCurrentBlock(Cond);
10257     }
10258     UnhandledProbs -= I->Prob;
10259 
10260     switch (I->Kind) {
10261       case CC_JumpTable: {
10262         // FIXME: Optimize away range check based on pivot comparisons.
10263         JumpTableHeader *JTH = &JTCases[I->JTCasesIndex].first;
10264         JumpTable *JT = &JTCases[I->JTCasesIndex].second;
10265 
10266         // The jump block hasn't been inserted yet; insert it here.
10267         MachineBasicBlock *JumpMBB = JT->MBB;
10268         CurMF->insert(BBI, JumpMBB);
10269 
10270         auto JumpProb = I->Prob;
10271         auto FallthroughProb = UnhandledProbs;
10272 
10273         // If the default statement is a target of the jump table, we evenly
10274         // distribute the default probability to successors of CurMBB. Also
10275         // update the probability on the edge from JumpMBB to Fallthrough.
10276         for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
10277                                               SE = JumpMBB->succ_end();
10278              SI != SE; ++SI) {
10279           if (*SI == DefaultMBB) {
10280             JumpProb += DefaultProb / 2;
10281             FallthroughProb -= DefaultProb / 2;
10282             JumpMBB->setSuccProbability(SI, DefaultProb / 2);
10283             JumpMBB->normalizeSuccProbs();
10284             break;
10285           }
10286         }
10287 
10288         addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
10289         addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
10290         CurMBB->normalizeSuccProbs();
10291 
10292         // The jump table header will be inserted in our current block, do the
10293         // range check, and fall through to our fallthrough block.
10294         JTH->HeaderBB = CurMBB;
10295         JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
10296 
10297         // If we're in the right place, emit the jump table header right now.
10298         if (CurMBB == SwitchMBB) {
10299           visitJumpTableHeader(*JT, *JTH, SwitchMBB);
10300           JTH->Emitted = true;
10301         }
10302         break;
10303       }
10304       case CC_BitTests: {
10305         // FIXME: Optimize away range check based on pivot comparisons.
10306         BitTestBlock *BTB = &BitTestCases[I->BTCasesIndex];
10307 
10308         // The bit test blocks haven't been inserted yet; insert them here.
10309         for (BitTestCase &BTC : BTB->Cases)
10310           CurMF->insert(BBI, BTC.ThisBB);
10311 
10312         // Fill in fields of the BitTestBlock.
10313         BTB->Parent = CurMBB;
10314         BTB->Default = Fallthrough;
10315 
10316         BTB->DefaultProb = UnhandledProbs;
10317         // If the cases in bit test don't form a contiguous range, we evenly
10318         // distribute the probability on the edge to Fallthrough to two
10319         // successors of CurMBB.
10320         if (!BTB->ContiguousRange) {
10321           BTB->Prob += DefaultProb / 2;
10322           BTB->DefaultProb -= DefaultProb / 2;
10323         }
10324 
10325         // If we're in the right place, emit the bit test header right now.
10326         if (CurMBB == SwitchMBB) {
10327           visitBitTestHeader(*BTB, SwitchMBB);
10328           BTB->Emitted = true;
10329         }
10330         break;
10331       }
10332       case CC_Range: {
10333         const Value *RHS, *LHS, *MHS;
10334         ISD::CondCode CC;
10335         if (I->Low == I->High) {
10336           // Check Cond == I->Low.
10337           CC = ISD::SETEQ;
10338           LHS = Cond;
10339           RHS=I->Low;
10340           MHS = nullptr;
10341         } else {
10342           // Check I->Low <= Cond <= I->High.
10343           CC = ISD::SETLE;
10344           LHS = I->Low;
10345           MHS = Cond;
10346           RHS = I->High;
10347         }
10348 
10349         // The false probability is the sum of all unhandled cases.
10350         CaseBlock CB(CC, LHS, RHS, MHS, I->MBB, Fallthrough, CurMBB,
10351                      getCurSDLoc(), I->Prob, UnhandledProbs);
10352 
10353         if (CurMBB == SwitchMBB)
10354           visitSwitchCase(CB, SwitchMBB);
10355         else
10356           SwitchCases.push_back(CB);
10357 
10358         break;
10359       }
10360     }
10361     CurMBB = Fallthrough;
10362   }
10363 }
10364 
10365 unsigned SelectionDAGBuilder::caseClusterRank(const CaseCluster &CC,
10366                                               CaseClusterIt First,
10367                                               CaseClusterIt Last) {
10368   return std::count_if(First, Last + 1, [&](const CaseCluster &X) {
10369     if (X.Prob != CC.Prob)
10370       return X.Prob > CC.Prob;
10371 
10372     // Ties are broken by comparing the case value.
10373     return X.Low->getValue().slt(CC.Low->getValue());
10374   });
10375 }
10376 
10377 void SelectionDAGBuilder::splitWorkItem(SwitchWorkList &WorkList,
10378                                         const SwitchWorkListItem &W,
10379                                         Value *Cond,
10380                                         MachineBasicBlock *SwitchMBB) {
10381   assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) &&
10382          "Clusters not sorted?");
10383 
10384   assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!");
10385 
10386   // Balance the tree based on branch probabilities to create a near-optimal (in
10387   // terms of search time given key frequency) binary search tree. See e.g. Kurt
10388   // Mehlhorn "Nearly Optimal Binary Search Trees" (1975).
10389   CaseClusterIt LastLeft = W.FirstCluster;
10390   CaseClusterIt FirstRight = W.LastCluster;
10391   auto LeftProb = LastLeft->Prob + W.DefaultProb / 2;
10392   auto RightProb = FirstRight->Prob + W.DefaultProb / 2;
10393 
10394   // Move LastLeft and FirstRight towards each other from opposite directions to
10395   // find a partitioning of the clusters which balances the probability on both
10396   // sides. If LeftProb and RightProb are equal, alternate which side is
10397   // taken to ensure 0-probability nodes are distributed evenly.
10398   unsigned I = 0;
10399   while (LastLeft + 1 < FirstRight) {
10400     if (LeftProb < RightProb || (LeftProb == RightProb && (I & 1)))
10401       LeftProb += (++LastLeft)->Prob;
10402     else
10403       RightProb += (--FirstRight)->Prob;
10404     I++;
10405   }
10406 
10407   while (true) {
10408     // Our binary search tree differs from a typical BST in that ours can have up
10409     // to three values in each leaf. The pivot selection above doesn't take that
10410     // into account, which means the tree might require more nodes and be less
10411     // efficient. We compensate for this here.
10412 
10413     unsigned NumLeft = LastLeft - W.FirstCluster + 1;
10414     unsigned NumRight = W.LastCluster - FirstRight + 1;
10415 
10416     if (std::min(NumLeft, NumRight) < 3 && std::max(NumLeft, NumRight) > 3) {
10417       // If one side has less than 3 clusters, and the other has more than 3,
10418       // consider taking a cluster from the other side.
10419 
10420       if (NumLeft < NumRight) {
10421         // Consider moving the first cluster on the right to the left side.
10422         CaseCluster &CC = *FirstRight;
10423         unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
10424         unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
10425         if (LeftSideRank <= RightSideRank) {
10426           // Moving the cluster to the left does not demote it.
10427           ++LastLeft;
10428           ++FirstRight;
10429           continue;
10430         }
10431       } else {
10432         assert(NumRight < NumLeft);
10433         // Consider moving the last element on the left to the right side.
10434         CaseCluster &CC = *LastLeft;
10435         unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
10436         unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
10437         if (RightSideRank <= LeftSideRank) {
10438           // Moving the cluster to the right does not demot it.
10439           --LastLeft;
10440           --FirstRight;
10441           continue;
10442         }
10443       }
10444     }
10445     break;
10446   }
10447 
10448   assert(LastLeft + 1 == FirstRight);
10449   assert(LastLeft >= W.FirstCluster);
10450   assert(FirstRight <= W.LastCluster);
10451 
10452   // Use the first element on the right as pivot since we will make less-than
10453   // comparisons against it.
10454   CaseClusterIt PivotCluster = FirstRight;
10455   assert(PivotCluster > W.FirstCluster);
10456   assert(PivotCluster <= W.LastCluster);
10457 
10458   CaseClusterIt FirstLeft = W.FirstCluster;
10459   CaseClusterIt LastRight = W.LastCluster;
10460 
10461   const ConstantInt *Pivot = PivotCluster->Low;
10462 
10463   // New blocks will be inserted immediately after the current one.
10464   MachineFunction::iterator BBI(W.MBB);
10465   ++BBI;
10466 
10467   // We will branch to the LHS if Value < Pivot. If LHS is a single cluster,
10468   // we can branch to its destination directly if it's squeezed exactly in
10469   // between the known lower bound and Pivot - 1.
10470   MachineBasicBlock *LeftMBB;
10471   if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&
10472       FirstLeft->Low == W.GE &&
10473       (FirstLeft->High->getValue() + 1LL) == Pivot->getValue()) {
10474     LeftMBB = FirstLeft->MBB;
10475   } else {
10476     LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
10477     FuncInfo.MF->insert(BBI, LeftMBB);
10478     WorkList.push_back(
10479         {LeftMBB, FirstLeft, LastLeft, W.GE, Pivot, W.DefaultProb / 2});
10480     // Put Cond in a virtual register to make it available from the new blocks.
10481     ExportFromCurrentBlock(Cond);
10482   }
10483 
10484   // Similarly, we will branch to the RHS if Value >= Pivot. If RHS is a
10485   // single cluster, RHS.Low == Pivot, and we can branch to its destination
10486   // directly if RHS.High equals the current upper bound.
10487   MachineBasicBlock *RightMBB;
10488   if (FirstRight == LastRight && FirstRight->Kind == CC_Range &&
10489       W.LT && (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) {
10490     RightMBB = FirstRight->MBB;
10491   } else {
10492     RightMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
10493     FuncInfo.MF->insert(BBI, RightMBB);
10494     WorkList.push_back(
10495         {RightMBB, FirstRight, LastRight, Pivot, W.LT, W.DefaultProb / 2});
10496     // Put Cond in a virtual register to make it available from the new blocks.
10497     ExportFromCurrentBlock(Cond);
10498   }
10499 
10500   // Create the CaseBlock record that will be used to lower the branch.
10501   CaseBlock CB(ISD::SETLT, Cond, Pivot, nullptr, LeftMBB, RightMBB, W.MBB,
10502                getCurSDLoc(), LeftProb, RightProb);
10503 
10504   if (W.MBB == SwitchMBB)
10505     visitSwitchCase(CB, SwitchMBB);
10506   else
10507     SwitchCases.push_back(CB);
10508 }
10509 
10510 // Scale CaseProb after peeling a case with the probablity of PeeledCaseProb
10511 // from the swith statement.
10512 static BranchProbability scaleCaseProbality(BranchProbability CaseProb,
10513                                             BranchProbability PeeledCaseProb) {
10514   if (PeeledCaseProb == BranchProbability::getOne())
10515     return BranchProbability::getZero();
10516   BranchProbability SwitchProb = PeeledCaseProb.getCompl();
10517 
10518   uint32_t Numerator = CaseProb.getNumerator();
10519   uint32_t Denominator = SwitchProb.scale(CaseProb.getDenominator());
10520   return BranchProbability(Numerator, std::max(Numerator, Denominator));
10521 }
10522 
10523 // Try to peel the top probability case if it exceeds the threshold.
10524 // Return current MachineBasicBlock for the switch statement if the peeling
10525 // does not occur.
10526 // If the peeling is performed, return the newly created MachineBasicBlock
10527 // for the peeled switch statement. Also update Clusters to remove the peeled
10528 // case. PeeledCaseProb is the BranchProbability for the peeled case.
10529 MachineBasicBlock *SelectionDAGBuilder::peelDominantCaseCluster(
10530     const SwitchInst &SI, CaseClusterVector &Clusters,
10531     BranchProbability &PeeledCaseProb) {
10532   MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
10533   // Don't perform if there is only one cluster or optimizing for size.
10534   if (SwitchPeelThreshold > 100 || !FuncInfo.BPI || Clusters.size() < 2 ||
10535       TM.getOptLevel() == CodeGenOpt::None ||
10536       SwitchMBB->getParent()->getFunction().optForMinSize())
10537     return SwitchMBB;
10538 
10539   BranchProbability TopCaseProb = BranchProbability(SwitchPeelThreshold, 100);
10540   unsigned PeeledCaseIndex = 0;
10541   bool SwitchPeeled = false;
10542   for (unsigned Index = 0; Index < Clusters.size(); ++Index) {
10543     CaseCluster &CC = Clusters[Index];
10544     if (CC.Prob < TopCaseProb)
10545       continue;
10546     TopCaseProb = CC.Prob;
10547     PeeledCaseIndex = Index;
10548     SwitchPeeled = true;
10549   }
10550   if (!SwitchPeeled)
10551     return SwitchMBB;
10552 
10553   LLVM_DEBUG(dbgs() << "Peeled one top case in switch stmt, prob: "
10554                     << TopCaseProb << "\n");
10555 
10556   // Record the MBB for the peeled switch statement.
10557   MachineFunction::iterator BBI(SwitchMBB);
10558   ++BBI;
10559   MachineBasicBlock *PeeledSwitchMBB =
10560       FuncInfo.MF->CreateMachineBasicBlock(SwitchMBB->getBasicBlock());
10561   FuncInfo.MF->insert(BBI, PeeledSwitchMBB);
10562 
10563   ExportFromCurrentBlock(SI.getCondition());
10564   auto PeeledCaseIt = Clusters.begin() + PeeledCaseIndex;
10565   SwitchWorkListItem W = {SwitchMBB, PeeledCaseIt, PeeledCaseIt,
10566                           nullptr,   nullptr,      TopCaseProb.getCompl()};
10567   lowerWorkItem(W, SI.getCondition(), SwitchMBB, PeeledSwitchMBB);
10568 
10569   Clusters.erase(PeeledCaseIt);
10570   for (CaseCluster &CC : Clusters) {
10571     LLVM_DEBUG(
10572         dbgs() << "Scale the probablity for one cluster, before scaling: "
10573                << CC.Prob << "\n");
10574     CC.Prob = scaleCaseProbality(CC.Prob, TopCaseProb);
10575     LLVM_DEBUG(dbgs() << "After scaling: " << CC.Prob << "\n");
10576   }
10577   PeeledCaseProb = TopCaseProb;
10578   return PeeledSwitchMBB;
10579 }
10580 
10581 void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
10582   // Extract cases from the switch.
10583   BranchProbabilityInfo *BPI = FuncInfo.BPI;
10584   CaseClusterVector Clusters;
10585   Clusters.reserve(SI.getNumCases());
10586   for (auto I : SI.cases()) {
10587     MachineBasicBlock *Succ = FuncInfo.MBBMap[I.getCaseSuccessor()];
10588     const ConstantInt *CaseVal = I.getCaseValue();
10589     BranchProbability Prob =
10590         BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
10591             : BranchProbability(1, SI.getNumCases() + 1);
10592     Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
10593   }
10594 
10595   MachineBasicBlock *DefaultMBB = FuncInfo.MBBMap[SI.getDefaultDest()];
10596 
10597   // Cluster adjacent cases with the same destination. We do this at all
10598   // optimization levels because it's cheap to do and will make codegen faster
10599   // if there are many clusters.
10600   sortAndRangeify(Clusters);
10601 
10602   if (TM.getOptLevel() != CodeGenOpt::None) {
10603     // Replace an unreachable default with the most popular destination.
10604     // FIXME: Exploit unreachable default more aggressively.
10605     bool UnreachableDefault =
10606         isa<UnreachableInst>(SI.getDefaultDest()->getFirstNonPHIOrDbg());
10607     if (UnreachableDefault && !Clusters.empty()) {
10608       DenseMap<const BasicBlock *, unsigned> Popularity;
10609       unsigned MaxPop = 0;
10610       const BasicBlock *MaxBB = nullptr;
10611       for (auto I : SI.cases()) {
10612         const BasicBlock *BB = I.getCaseSuccessor();
10613         if (++Popularity[BB] > MaxPop) {
10614           MaxPop = Popularity[BB];
10615           MaxBB = BB;
10616         }
10617       }
10618       // Set new default.
10619       assert(MaxPop > 0 && MaxBB);
10620       DefaultMBB = FuncInfo.MBBMap[MaxBB];
10621 
10622       // Remove cases that were pointing to the destination that is now the
10623       // default.
10624       CaseClusterVector New;
10625       New.reserve(Clusters.size());
10626       for (CaseCluster &CC : Clusters) {
10627         if (CC.MBB != DefaultMBB)
10628           New.push_back(CC);
10629       }
10630       Clusters = std::move(New);
10631     }
10632   }
10633 
10634   // The branch probablity of the peeled case.
10635   BranchProbability PeeledCaseProb = BranchProbability::getZero();
10636   MachineBasicBlock *PeeledSwitchMBB =
10637       peelDominantCaseCluster(SI, Clusters, PeeledCaseProb);
10638 
10639   // If there is only the default destination, jump there directly.
10640   MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
10641   if (Clusters.empty()) {
10642     assert(PeeledSwitchMBB == SwitchMBB);
10643     SwitchMBB->addSuccessor(DefaultMBB);
10644     if (DefaultMBB != NextBlock(SwitchMBB)) {
10645       DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
10646                               getControlRoot(), DAG.getBasicBlock(DefaultMBB)));
10647     }
10648     return;
10649   }
10650 
10651   findJumpTables(Clusters, &SI, DefaultMBB);
10652   findBitTestClusters(Clusters, &SI);
10653 
10654   LLVM_DEBUG({
10655     dbgs() << "Case clusters: ";
10656     for (const CaseCluster &C : Clusters) {
10657       if (C.Kind == CC_JumpTable)
10658         dbgs() << "JT:";
10659       if (C.Kind == CC_BitTests)
10660         dbgs() << "BT:";
10661 
10662       C.Low->getValue().print(dbgs(), true);
10663       if (C.Low != C.High) {
10664         dbgs() << '-';
10665         C.High->getValue().print(dbgs(), true);
10666       }
10667       dbgs() << ' ';
10668     }
10669     dbgs() << '\n';
10670   });
10671 
10672   assert(!Clusters.empty());
10673   SwitchWorkList WorkList;
10674   CaseClusterIt First = Clusters.begin();
10675   CaseClusterIt Last = Clusters.end() - 1;
10676   auto DefaultProb = getEdgeProbability(PeeledSwitchMBB, DefaultMBB);
10677   // Scale the branchprobability for DefaultMBB if the peel occurs and
10678   // DefaultMBB is not replaced.
10679   if (PeeledCaseProb != BranchProbability::getZero() &&
10680       DefaultMBB == FuncInfo.MBBMap[SI.getDefaultDest()])
10681     DefaultProb = scaleCaseProbality(DefaultProb, PeeledCaseProb);
10682   WorkList.push_back(
10683       {PeeledSwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
10684 
10685   while (!WorkList.empty()) {
10686     SwitchWorkListItem W = WorkList.back();
10687     WorkList.pop_back();
10688     unsigned NumClusters = W.LastCluster - W.FirstCluster + 1;
10689 
10690     if (NumClusters > 3 && TM.getOptLevel() != CodeGenOpt::None &&
10691         !DefaultMBB->getParent()->getFunction().optForMinSize()) {
10692       // For optimized builds, lower large range as a balanced binary tree.
10693       splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB);
10694       continue;
10695     }
10696 
10697     lowerWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB);
10698   }
10699 }
10700