xref: /llvm-project/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp (revision 4dba59689d008df7be37733de4bb537b2911d3ad)
1 //===- SelectionDAGBuilder.cpp - Selection-DAG building -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements routines for translating from LLVM IR into SelectionDAG IR.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "SelectionDAGBuilder.h"
14 #include "SDNodeDbgValue.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/BitVector.h"
19 #include "llvm/ADT/DenseMap.h"
20 #include "llvm/ADT/None.h"
21 #include "llvm/ADT/Optional.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/ADT/SmallPtrSet.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/StringRef.h"
27 #include "llvm/ADT/Triple.h"
28 #include "llvm/ADT/Twine.h"
29 #include "llvm/Analysis/AliasAnalysis.h"
30 #include "llvm/Analysis/BlockFrequencyInfo.h"
31 #include "llvm/Analysis/BranchProbabilityInfo.h"
32 #include "llvm/Analysis/ConstantFolding.h"
33 #include "llvm/Analysis/EHPersonalities.h"
34 #include "llvm/Analysis/Loads.h"
35 #include "llvm/Analysis/MemoryLocation.h"
36 #include "llvm/Analysis/ProfileSummaryInfo.h"
37 #include "llvm/Analysis/TargetLibraryInfo.h"
38 #include "llvm/Analysis/ValueTracking.h"
39 #include "llvm/Analysis/VectorUtils.h"
40 #include "llvm/CodeGen/Analysis.h"
41 #include "llvm/CodeGen/FunctionLoweringInfo.h"
42 #include "llvm/CodeGen/GCMetadata.h"
43 #include "llvm/CodeGen/ISDOpcodes.h"
44 #include "llvm/CodeGen/MachineBasicBlock.h"
45 #include "llvm/CodeGen/MachineFrameInfo.h"
46 #include "llvm/CodeGen/MachineFunction.h"
47 #include "llvm/CodeGen/MachineInstr.h"
48 #include "llvm/CodeGen/MachineInstrBuilder.h"
49 #include "llvm/CodeGen/MachineJumpTableInfo.h"
50 #include "llvm/CodeGen/MachineMemOperand.h"
51 #include "llvm/CodeGen/MachineModuleInfo.h"
52 #include "llvm/CodeGen/MachineOperand.h"
53 #include "llvm/CodeGen/MachineRegisterInfo.h"
54 #include "llvm/CodeGen/RuntimeLibcalls.h"
55 #include "llvm/CodeGen/SelectionDAG.h"
56 #include "llvm/CodeGen/SelectionDAGNodes.h"
57 #include "llvm/CodeGen/SelectionDAGTargetInfo.h"
58 #include "llvm/CodeGen/StackMaps.h"
59 #include "llvm/CodeGen/SwiftErrorValueTracking.h"
60 #include "llvm/CodeGen/TargetFrameLowering.h"
61 #include "llvm/CodeGen/TargetInstrInfo.h"
62 #include "llvm/CodeGen/TargetLowering.h"
63 #include "llvm/CodeGen/TargetOpcodes.h"
64 #include "llvm/CodeGen/TargetRegisterInfo.h"
65 #include "llvm/CodeGen/TargetSubtargetInfo.h"
66 #include "llvm/CodeGen/ValueTypes.h"
67 #include "llvm/CodeGen/WinEHFuncInfo.h"
68 #include "llvm/IR/Argument.h"
69 #include "llvm/IR/Attributes.h"
70 #include "llvm/IR/BasicBlock.h"
71 #include "llvm/IR/CFG.h"
72 #include "llvm/IR/CallingConv.h"
73 #include "llvm/IR/Constant.h"
74 #include "llvm/IR/ConstantRange.h"
75 #include "llvm/IR/Constants.h"
76 #include "llvm/IR/DataLayout.h"
77 #include "llvm/IR/DebugInfoMetadata.h"
78 #include "llvm/IR/DebugLoc.h"
79 #include "llvm/IR/DerivedTypes.h"
80 #include "llvm/IR/Function.h"
81 #include "llvm/IR/GetElementPtrTypeIterator.h"
82 #include "llvm/IR/InlineAsm.h"
83 #include "llvm/IR/InstrTypes.h"
84 #include "llvm/IR/Instruction.h"
85 #include "llvm/IR/Instructions.h"
86 #include "llvm/IR/IntrinsicInst.h"
87 #include "llvm/IR/Intrinsics.h"
88 #include "llvm/IR/IntrinsicsAArch64.h"
89 #include "llvm/IR/IntrinsicsWebAssembly.h"
90 #include "llvm/IR/LLVMContext.h"
91 #include "llvm/IR/Metadata.h"
92 #include "llvm/IR/Module.h"
93 #include "llvm/IR/Operator.h"
94 #include "llvm/IR/PatternMatch.h"
95 #include "llvm/IR/Statepoint.h"
96 #include "llvm/IR/Type.h"
97 #include "llvm/IR/User.h"
98 #include "llvm/IR/Value.h"
99 #include "llvm/MC/MCContext.h"
100 #include "llvm/MC/MCSymbol.h"
101 #include "llvm/Support/AtomicOrdering.h"
102 #include "llvm/Support/BranchProbability.h"
103 #include "llvm/Support/Casting.h"
104 #include "llvm/Support/CodeGen.h"
105 #include "llvm/Support/CommandLine.h"
106 #include "llvm/Support/Compiler.h"
107 #include "llvm/Support/Debug.h"
108 #include "llvm/Support/ErrorHandling.h"
109 #include "llvm/Support/MachineValueType.h"
110 #include "llvm/Support/MathExtras.h"
111 #include "llvm/Support/raw_ostream.h"
112 #include "llvm/Target/TargetIntrinsicInfo.h"
113 #include "llvm/Target/TargetMachine.h"
114 #include "llvm/Target/TargetOptions.h"
115 #include "llvm/Transforms/Utils/Local.h"
116 #include <algorithm>
117 #include <cassert>
118 #include <cstddef>
119 #include <cstdint>
120 #include <cstring>
121 #include <iterator>
122 #include <limits>
123 #include <numeric>
124 #include <tuple>
125 #include <utility>
126 #include <vector>
127 
128 using namespace llvm;
129 using namespace PatternMatch;
130 using namespace SwitchCG;
131 
132 #define DEBUG_TYPE "isel"
133 
134 /// LimitFloatPrecision - Generate low-precision inline sequences for
135 /// some float libcalls (6, 8 or 12 bits).
136 static unsigned LimitFloatPrecision;
137 
138 static cl::opt<unsigned, true>
139     LimitFPPrecision("limit-float-precision",
140                      cl::desc("Generate low-precision inline sequences "
141                               "for some float libcalls"),
142                      cl::location(LimitFloatPrecision), cl::Hidden,
143                      cl::init(0));
144 
145 static cl::opt<unsigned> SwitchPeelThreshold(
146     "switch-peel-threshold", cl::Hidden, cl::init(66),
147     cl::desc("Set the case probability threshold for peeling the case from a "
148              "switch statement. A value greater than 100 will void this "
149              "optimization"));
150 
151 // Limit the width of DAG chains. This is important in general to prevent
152 // DAG-based analysis from blowing up. For example, alias analysis and
153 // load clustering may not complete in reasonable time. It is difficult to
154 // recognize and avoid this situation within each individual analysis, and
155 // future analyses are likely to have the same behavior. Limiting DAG width is
156 // the safe approach and will be especially important with global DAGs.
157 //
158 // MaxParallelChains default is arbitrarily high to avoid affecting
159 // optimization, but could be lowered to improve compile time. Any ld-ld-st-st
160 // sequence over this should have been converted to llvm.memcpy by the
161 // frontend. It is easy to induce this behavior with .ll code such as:
162 // %buffer = alloca [4096 x i8]
163 // %data = load [4096 x i8]* %argPtr
164 // store [4096 x i8] %data, [4096 x i8]* %buffer
165 static const unsigned MaxParallelChains = 64;
166 
167 // Return the calling convention if the Value passed requires ABI mangling as it
168 // is a parameter to a function or a return value from a function which is not
169 // an intrinsic.
170 static Optional<CallingConv::ID> getABIRegCopyCC(const Value *V) {
171   if (auto *R = dyn_cast<ReturnInst>(V))
172     return R->getParent()->getParent()->getCallingConv();
173 
174   if (auto *CI = dyn_cast<CallInst>(V)) {
175     const bool IsInlineAsm = CI->isInlineAsm();
176     const bool IsIndirectFunctionCall =
177         !IsInlineAsm && !CI->getCalledFunction();
178 
179     // It is possible that the call instruction is an inline asm statement or an
180     // indirect function call in which case the return value of
181     // getCalledFunction() would be nullptr.
182     const bool IsInstrinsicCall =
183         !IsInlineAsm && !IsIndirectFunctionCall &&
184         CI->getCalledFunction()->getIntrinsicID() != Intrinsic::not_intrinsic;
185 
186     if (!IsInlineAsm && !IsInstrinsicCall)
187       return CI->getCallingConv();
188   }
189 
190   return None;
191 }
192 
193 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
194                                       const SDValue *Parts, unsigned NumParts,
195                                       MVT PartVT, EVT ValueVT, const Value *V,
196                                       Optional<CallingConv::ID> CC);
197 
198 /// getCopyFromParts - Create a value that contains the specified legal parts
199 /// combined into the value they represent.  If the parts combine to a type
200 /// larger than ValueVT then AssertOp can be used to specify whether the extra
201 /// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
202 /// (ISD::AssertSext).
203 static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL,
204                                 const SDValue *Parts, unsigned NumParts,
205                                 MVT PartVT, EVT ValueVT, const Value *V,
206                                 Optional<CallingConv::ID> CC = None,
207                                 Optional<ISD::NodeType> AssertOp = None) {
208   if (ValueVT.isVector())
209     return getCopyFromPartsVector(DAG, DL, Parts, NumParts, PartVT, ValueVT, V,
210                                   CC);
211 
212   assert(NumParts > 0 && "No parts to assemble!");
213   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
214   SDValue Val = Parts[0];
215 
216   if (NumParts > 1) {
217     // Assemble the value from multiple parts.
218     if (ValueVT.isInteger()) {
219       unsigned PartBits = PartVT.getSizeInBits();
220       unsigned ValueBits = ValueVT.getSizeInBits();
221 
222       // Assemble the power of 2 part.
223       unsigned RoundParts =
224           (NumParts & (NumParts - 1)) ? 1 << Log2_32(NumParts) : NumParts;
225       unsigned RoundBits = PartBits * RoundParts;
226       EVT RoundVT = RoundBits == ValueBits ?
227         ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
228       SDValue Lo, Hi;
229 
230       EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
231 
232       if (RoundParts > 2) {
233         Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2,
234                               PartVT, HalfVT, V);
235         Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2,
236                               RoundParts / 2, PartVT, HalfVT, V);
237       } else {
238         Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]);
239         Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]);
240       }
241 
242       if (DAG.getDataLayout().isBigEndian())
243         std::swap(Lo, Hi);
244 
245       Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi);
246 
247       if (RoundParts < NumParts) {
248         // Assemble the trailing non-power-of-2 part.
249         unsigned OddParts = NumParts - RoundParts;
250         EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
251         Hi = getCopyFromParts(DAG, DL, Parts + RoundParts, OddParts, PartVT,
252                               OddVT, V, CC);
253 
254         // Combine the round and odd parts.
255         Lo = Val;
256         if (DAG.getDataLayout().isBigEndian())
257           std::swap(Lo, Hi);
258         EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
259         Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi);
260         Hi =
261             DAG.getNode(ISD::SHL, DL, TotalVT, Hi,
262                         DAG.getConstant(Lo.getValueSizeInBits(), DL,
263                                         TLI.getPointerTy(DAG.getDataLayout())));
264         Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo);
265         Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi);
266       }
267     } else if (PartVT.isFloatingPoint()) {
268       // FP split into multiple FP parts (for ppcf128)
269       assert(ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
270              "Unexpected split");
271       SDValue Lo, Hi;
272       Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]);
273       Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]);
274       if (TLI.hasBigEndianPartOrdering(ValueVT, DAG.getDataLayout()))
275         std::swap(Lo, Hi);
276       Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi);
277     } else {
278       // FP split into integer parts (soft fp)
279       assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
280              !PartVT.isVector() && "Unexpected split");
281       EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
282       Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V, CC);
283     }
284   }
285 
286   // There is now one part, held in Val.  Correct it to match ValueVT.
287   // PartEVT is the type of the register class that holds the value.
288   // ValueVT is the type of the inline asm operation.
289   EVT PartEVT = Val.getValueType();
290 
291   if (PartEVT == ValueVT)
292     return Val;
293 
294   if (PartEVT.isInteger() && ValueVT.isFloatingPoint() &&
295       ValueVT.bitsLT(PartEVT)) {
296     // For an FP value in an integer part, we need to truncate to the right
297     // width first.
298     PartEVT = EVT::getIntegerVT(*DAG.getContext(),  ValueVT.getSizeInBits());
299     Val = DAG.getNode(ISD::TRUNCATE, DL, PartEVT, Val);
300   }
301 
302   // Handle types that have the same size.
303   if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits())
304     return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
305 
306   // Handle types with different sizes.
307   if (PartEVT.isInteger() && ValueVT.isInteger()) {
308     if (ValueVT.bitsLT(PartEVT)) {
309       // For a truncate, see if we have any information to
310       // indicate whether the truncated bits will always be
311       // zero or sign-extension.
312       if (AssertOp.hasValue())
313         Val = DAG.getNode(*AssertOp, DL, PartEVT, Val,
314                           DAG.getValueType(ValueVT));
315       return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
316     }
317     return DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
318   }
319 
320   if (PartEVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
321     // FP_ROUND's are always exact here.
322     if (ValueVT.bitsLT(Val.getValueType()))
323       return DAG.getNode(
324           ISD::FP_ROUND, DL, ValueVT, Val,
325           DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout())));
326 
327     return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val);
328   }
329 
330   // Handle MMX to a narrower integer type by bitcasting MMX to integer and
331   // then truncating.
332   if (PartEVT == MVT::x86mmx && ValueVT.isInteger() &&
333       ValueVT.bitsLT(PartEVT)) {
334     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i64, Val);
335     return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
336   }
337 
338   report_fatal_error("Unknown mismatch in getCopyFromParts!");
339 }
340 
341 static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V,
342                                               const Twine &ErrMsg) {
343   const Instruction *I = dyn_cast_or_null<Instruction>(V);
344   if (!V)
345     return Ctx.emitError(ErrMsg);
346 
347   const char *AsmError = ", possible invalid constraint for vector type";
348   if (const CallInst *CI = dyn_cast<CallInst>(I))
349     if (CI->isInlineAsm())
350       return Ctx.emitError(I, ErrMsg + AsmError);
351 
352   return Ctx.emitError(I, ErrMsg);
353 }
354 
355 /// getCopyFromPartsVector - Create a value that contains the specified legal
356 /// parts combined into the value they represent.  If the parts combine to a
357 /// type larger than ValueVT then AssertOp can be used to specify whether the
358 /// extra bits are known to be zero (ISD::AssertZext) or sign extended from
359 /// ValueVT (ISD::AssertSext).
360 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
361                                       const SDValue *Parts, unsigned NumParts,
362                                       MVT PartVT, EVT ValueVT, const Value *V,
363                                       Optional<CallingConv::ID> CallConv) {
364   assert(ValueVT.isVector() && "Not a vector value");
365   assert(NumParts > 0 && "No parts to assemble!");
366   const bool IsABIRegCopy = CallConv.hasValue();
367 
368   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
369   SDValue Val = Parts[0];
370 
371   // Handle a multi-element vector.
372   if (NumParts > 1) {
373     EVT IntermediateVT;
374     MVT RegisterVT;
375     unsigned NumIntermediates;
376     unsigned NumRegs;
377 
378     if (IsABIRegCopy) {
379       NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
380           *DAG.getContext(), CallConv.getValue(), ValueVT, IntermediateVT,
381           NumIntermediates, RegisterVT);
382     } else {
383       NumRegs =
384           TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
385                                      NumIntermediates, RegisterVT);
386     }
387 
388     assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
389     NumParts = NumRegs; // Silence a compiler warning.
390     assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
391     assert(RegisterVT.getSizeInBits() ==
392            Parts[0].getSimpleValueType().getSizeInBits() &&
393            "Part type sizes don't match!");
394 
395     // Assemble the parts into intermediate operands.
396     SmallVector<SDValue, 8> Ops(NumIntermediates);
397     if (NumIntermediates == NumParts) {
398       // If the register was not expanded, truncate or copy the value,
399       // as appropriate.
400       for (unsigned i = 0; i != NumParts; ++i)
401         Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1,
402                                   PartVT, IntermediateVT, V);
403     } else if (NumParts > 0) {
404       // If the intermediate type was expanded, build the intermediate
405       // operands from the parts.
406       assert(NumParts % NumIntermediates == 0 &&
407              "Must expand into a divisible number of parts!");
408       unsigned Factor = NumParts / NumIntermediates;
409       for (unsigned i = 0; i != NumIntermediates; ++i)
410         Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor,
411                                   PartVT, IntermediateVT, V);
412     }
413 
414     // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
415     // intermediate operands.
416     EVT BuiltVectorTy =
417         IntermediateVT.isVector()
418             ? EVT::getVectorVT(
419                   *DAG.getContext(), IntermediateVT.getScalarType(),
420                   IntermediateVT.getVectorElementCount() * NumParts)
421             : EVT::getVectorVT(*DAG.getContext(),
422                                IntermediateVT.getScalarType(),
423                                NumIntermediates);
424     Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS
425                                                 : ISD::BUILD_VECTOR,
426                       DL, BuiltVectorTy, Ops);
427   }
428 
429   // There is now one part, held in Val.  Correct it to match ValueVT.
430   EVT PartEVT = Val.getValueType();
431 
432   if (PartEVT == ValueVT)
433     return Val;
434 
435   if (PartEVT.isVector()) {
436     // If the element type of the source/dest vectors are the same, but the
437     // parts vector has more elements than the value vector, then we have a
438     // vector widening case (e.g. <2 x float> -> <4 x float>).  Extract the
439     // elements we want.
440     if (PartEVT.getVectorElementType() == ValueVT.getVectorElementType()) {
441       assert((PartEVT.getVectorElementCount().Min >
442               ValueVT.getVectorElementCount().Min) &&
443              (PartEVT.getVectorElementCount().Scalable ==
444               ValueVT.getVectorElementCount().Scalable) &&
445              "Cannot narrow, it would be a lossy transformation");
446       return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
447                          DAG.getVectorIdxConstant(0, DL));
448     }
449 
450     // Vector/Vector bitcast.
451     if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
452       return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
453 
454     assert(PartEVT.getVectorElementCount() == ValueVT.getVectorElementCount() &&
455       "Cannot handle this kind of promotion");
456     // Promoted vector extract
457     return DAG.getAnyExtOrTrunc(Val, DL, ValueVT);
458 
459   }
460 
461   // Trivial bitcast if the types are the same size and the destination
462   // vector type is legal.
463   if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits() &&
464       TLI.isTypeLegal(ValueVT))
465     return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
466 
467   if (ValueVT.getVectorNumElements() != 1) {
468      // Certain ABIs require that vectors are passed as integers. For vectors
469      // are the same size, this is an obvious bitcast.
470      if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) {
471        return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
472      } else if (ValueVT.getSizeInBits() < PartEVT.getSizeInBits()) {
473        // Bitcast Val back the original type and extract the corresponding
474        // vector we want.
475        unsigned Elts = PartEVT.getSizeInBits() / ValueVT.getScalarSizeInBits();
476        EVT WiderVecType = EVT::getVectorVT(*DAG.getContext(),
477                                            ValueVT.getVectorElementType(), Elts);
478        Val = DAG.getBitcast(WiderVecType, Val);
479        return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
480                           DAG.getVectorIdxConstant(0, DL));
481      }
482 
483      diagnosePossiblyInvalidConstraint(
484          *DAG.getContext(), V, "non-trivial scalar-to-vector conversion");
485      return DAG.getUNDEF(ValueVT);
486   }
487 
488   // Handle cases such as i8 -> <1 x i1>
489   EVT ValueSVT = ValueVT.getVectorElementType();
490   if (ValueVT.getVectorNumElements() == 1 && ValueSVT != PartEVT) {
491     if (ValueSVT.getSizeInBits() == PartEVT.getSizeInBits())
492       Val = DAG.getNode(ISD::BITCAST, DL, ValueSVT, Val);
493     else
494       Val = ValueVT.isFloatingPoint()
495                 ? DAG.getFPExtendOrRound(Val, DL, ValueSVT)
496                 : DAG.getAnyExtOrTrunc(Val, DL, ValueSVT);
497   }
498 
499   return DAG.getBuildVector(ValueVT, DL, Val);
500 }
501 
502 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl,
503                                  SDValue Val, SDValue *Parts, unsigned NumParts,
504                                  MVT PartVT, const Value *V,
505                                  Optional<CallingConv::ID> CallConv);
506 
507 /// getCopyToParts - Create a series of nodes that contain the specified value
508 /// split into legal parts.  If the parts contain more bits than Val, then, for
509 /// integers, ExtendKind can be used to specify how to generate the extra bits.
510 static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val,
511                            SDValue *Parts, unsigned NumParts, MVT PartVT,
512                            const Value *V,
513                            Optional<CallingConv::ID> CallConv = None,
514                            ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
515   EVT ValueVT = Val.getValueType();
516 
517   // Handle the vector case separately.
518   if (ValueVT.isVector())
519     return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V,
520                                 CallConv);
521 
522   unsigned PartBits = PartVT.getSizeInBits();
523   unsigned OrigNumParts = NumParts;
524   assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) &&
525          "Copying to an illegal type!");
526 
527   if (NumParts == 0)
528     return;
529 
530   assert(!ValueVT.isVector() && "Vector case handled elsewhere");
531   EVT PartEVT = PartVT;
532   if (PartEVT == ValueVT) {
533     assert(NumParts == 1 && "No-op copy with multiple parts!");
534     Parts[0] = Val;
535     return;
536   }
537 
538   if (NumParts * PartBits > ValueVT.getSizeInBits()) {
539     // If the parts cover more bits than the value has, promote the value.
540     if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
541       assert(NumParts == 1 && "Do not know what to promote to!");
542       Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val);
543     } else {
544       if (ValueVT.isFloatingPoint()) {
545         // FP values need to be bitcast, then extended if they are being put
546         // into a larger container.
547         ValueVT = EVT::getIntegerVT(*DAG.getContext(),  ValueVT.getSizeInBits());
548         Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
549       }
550       assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
551              ValueVT.isInteger() &&
552              "Unknown mismatch!");
553       ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
554       Val = DAG.getNode(ExtendKind, DL, ValueVT, Val);
555       if (PartVT == MVT::x86mmx)
556         Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
557     }
558   } else if (PartBits == ValueVT.getSizeInBits()) {
559     // Different types of the same size.
560     assert(NumParts == 1 && PartEVT != ValueVT);
561     Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
562   } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
563     // If the parts cover less bits than value has, truncate the value.
564     assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
565            ValueVT.isInteger() &&
566            "Unknown mismatch!");
567     ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
568     Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
569     if (PartVT == MVT::x86mmx)
570       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
571   }
572 
573   // The value may have changed - recompute ValueVT.
574   ValueVT = Val.getValueType();
575   assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
576          "Failed to tile the value with PartVT!");
577 
578   if (NumParts == 1) {
579     if (PartEVT != ValueVT) {
580       diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
581                                         "scalar-to-vector conversion failed");
582       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
583     }
584 
585     Parts[0] = Val;
586     return;
587   }
588 
589   // Expand the value into multiple parts.
590   if (NumParts & (NumParts - 1)) {
591     // The number of parts is not a power of 2.  Split off and copy the tail.
592     assert(PartVT.isInteger() && ValueVT.isInteger() &&
593            "Do not know what to expand to!");
594     unsigned RoundParts = 1 << Log2_32(NumParts);
595     unsigned RoundBits = RoundParts * PartBits;
596     unsigned OddParts = NumParts - RoundParts;
597     SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val,
598       DAG.getShiftAmountConstant(RoundBits, ValueVT, DL, /*LegalTypes*/false));
599 
600     getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V,
601                    CallConv);
602 
603     if (DAG.getDataLayout().isBigEndian())
604       // The odd parts were reversed by getCopyToParts - unreverse them.
605       std::reverse(Parts + RoundParts, Parts + NumParts);
606 
607     NumParts = RoundParts;
608     ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
609     Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
610   }
611 
612   // The number of parts is a power of 2.  Repeatedly bisect the value using
613   // EXTRACT_ELEMENT.
614   Parts[0] = DAG.getNode(ISD::BITCAST, DL,
615                          EVT::getIntegerVT(*DAG.getContext(),
616                                            ValueVT.getSizeInBits()),
617                          Val);
618 
619   for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
620     for (unsigned i = 0; i < NumParts; i += StepSize) {
621       unsigned ThisBits = StepSize * PartBits / 2;
622       EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
623       SDValue &Part0 = Parts[i];
624       SDValue &Part1 = Parts[i+StepSize/2];
625 
626       Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
627                           ThisVT, Part0, DAG.getIntPtrConstant(1, DL));
628       Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
629                           ThisVT, Part0, DAG.getIntPtrConstant(0, DL));
630 
631       if (ThisBits == PartBits && ThisVT != PartVT) {
632         Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0);
633         Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1);
634       }
635     }
636   }
637 
638   if (DAG.getDataLayout().isBigEndian())
639     std::reverse(Parts, Parts + OrigNumParts);
640 }
641 
642 static SDValue widenVectorToPartType(SelectionDAG &DAG,
643                                      SDValue Val, const SDLoc &DL, EVT PartVT) {
644   if (!PartVT.isVector())
645     return SDValue();
646 
647   EVT ValueVT = Val.getValueType();
648   unsigned PartNumElts = PartVT.getVectorNumElements();
649   unsigned ValueNumElts = ValueVT.getVectorNumElements();
650   if (PartNumElts > ValueNumElts &&
651       PartVT.getVectorElementType() == ValueVT.getVectorElementType()) {
652     EVT ElementVT = PartVT.getVectorElementType();
653     // Vector widening case, e.g. <2 x float> -> <4 x float>.  Shuffle in
654     // undef elements.
655     SmallVector<SDValue, 16> Ops;
656     DAG.ExtractVectorElements(Val, Ops);
657     SDValue EltUndef = DAG.getUNDEF(ElementVT);
658     for (unsigned i = ValueNumElts, e = PartNumElts; i != e; ++i)
659       Ops.push_back(EltUndef);
660 
661     // FIXME: Use CONCAT for 2x -> 4x.
662     return DAG.getBuildVector(PartVT, DL, Ops);
663   }
664 
665   return SDValue();
666 }
667 
668 /// getCopyToPartsVector - Create a series of nodes that contain the specified
669 /// value split into legal parts.
670 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
671                                  SDValue Val, SDValue *Parts, unsigned NumParts,
672                                  MVT PartVT, const Value *V,
673                                  Optional<CallingConv::ID> CallConv) {
674   EVT ValueVT = Val.getValueType();
675   assert(ValueVT.isVector() && "Not a vector");
676   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
677   const bool IsABIRegCopy = CallConv.hasValue();
678 
679   if (NumParts == 1) {
680     EVT PartEVT = PartVT;
681     if (PartEVT == ValueVT) {
682       // Nothing to do.
683     } else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
684       // Bitconvert vector->vector case.
685       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
686     } else if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, PartVT)) {
687       Val = Widened;
688     } else if (PartVT.isVector() &&
689                PartEVT.getVectorElementType().bitsGE(
690                  ValueVT.getVectorElementType()) &&
691                PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements()) {
692 
693       // Promoted vector extract
694       Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
695     } else {
696       if (ValueVT.getVectorNumElements() == 1) {
697         Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, PartVT, Val,
698                           DAG.getVectorIdxConstant(0, DL));
699       } else {
700         assert(PartVT.getSizeInBits() > ValueVT.getSizeInBits() &&
701                "lossy conversion of vector to scalar type");
702         EVT IntermediateType =
703             EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
704         Val = DAG.getBitcast(IntermediateType, Val);
705         Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
706       }
707     }
708 
709     assert(Val.getValueType() == PartVT && "Unexpected vector part value type");
710     Parts[0] = Val;
711     return;
712   }
713 
714   // Handle a multi-element vector.
715   EVT IntermediateVT;
716   MVT RegisterVT;
717   unsigned NumIntermediates;
718   unsigned NumRegs;
719   if (IsABIRegCopy) {
720     NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
721         *DAG.getContext(), CallConv.getValue(), ValueVT, IntermediateVT,
722         NumIntermediates, RegisterVT);
723   } else {
724     NumRegs =
725         TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
726                                    NumIntermediates, RegisterVT);
727   }
728 
729   assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
730   NumParts = NumRegs; // Silence a compiler warning.
731   assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
732 
733   unsigned IntermediateNumElts = IntermediateVT.isVector() ?
734     IntermediateVT.getVectorNumElements() : 1;
735 
736   // Convert the vector to the appropriate type if necessary.
737   auto DestEltCnt = ElementCount(NumIntermediates * IntermediateNumElts,
738                                  ValueVT.isScalableVector());
739   EVT BuiltVectorTy = EVT::getVectorVT(
740       *DAG.getContext(), IntermediateVT.getScalarType(), DestEltCnt);
741   if (ValueVT != BuiltVectorTy) {
742     if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, BuiltVectorTy))
743       Val = Widened;
744 
745     Val = DAG.getNode(ISD::BITCAST, DL, BuiltVectorTy, Val);
746   }
747 
748   // Split the vector into intermediate operands.
749   SmallVector<SDValue, 8> Ops(NumIntermediates);
750   for (unsigned i = 0; i != NumIntermediates; ++i) {
751     if (IntermediateVT.isVector()) {
752       Ops[i] =
753           DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, IntermediateVT, Val,
754                       DAG.getVectorIdxConstant(i * IntermediateNumElts, DL));
755     } else {
756       Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, IntermediateVT, Val,
757                            DAG.getVectorIdxConstant(i, DL));
758     }
759   }
760 
761   // Split the intermediate operands into legal parts.
762   if (NumParts == NumIntermediates) {
763     // If the register was not expanded, promote or copy the value,
764     // as appropriate.
765     for (unsigned i = 0; i != NumParts; ++i)
766       getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V, CallConv);
767   } else if (NumParts > 0) {
768     // If the intermediate type was expanded, split each the value into
769     // legal parts.
770     assert(NumIntermediates != 0 && "division by zero");
771     assert(NumParts % NumIntermediates == 0 &&
772            "Must expand into a divisible number of parts!");
773     unsigned Factor = NumParts / NumIntermediates;
774     for (unsigned i = 0; i != NumIntermediates; ++i)
775       getCopyToParts(DAG, DL, Ops[i], &Parts[i * Factor], Factor, PartVT, V,
776                      CallConv);
777   }
778 }
779 
780 RegsForValue::RegsForValue(const SmallVector<unsigned, 4> &regs, MVT regvt,
781                            EVT valuevt, Optional<CallingConv::ID> CC)
782     : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs),
783       RegCount(1, regs.size()), CallConv(CC) {}
784 
785 RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
786                            const DataLayout &DL, unsigned Reg, Type *Ty,
787                            Optional<CallingConv::ID> CC) {
788   ComputeValueVTs(TLI, DL, Ty, ValueVTs);
789 
790   CallConv = CC;
791 
792   for (EVT ValueVT : ValueVTs) {
793     unsigned NumRegs =
794         isABIMangled()
795             ? TLI.getNumRegistersForCallingConv(Context, CC.getValue(), ValueVT)
796             : TLI.getNumRegisters(Context, ValueVT);
797     MVT RegisterVT =
798         isABIMangled()
799             ? TLI.getRegisterTypeForCallingConv(Context, CC.getValue(), ValueVT)
800             : TLI.getRegisterType(Context, ValueVT);
801     for (unsigned i = 0; i != NumRegs; ++i)
802       Regs.push_back(Reg + i);
803     RegVTs.push_back(RegisterVT);
804     RegCount.push_back(NumRegs);
805     Reg += NumRegs;
806   }
807 }
808 
809 SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
810                                       FunctionLoweringInfo &FuncInfo,
811                                       const SDLoc &dl, SDValue &Chain,
812                                       SDValue *Flag, const Value *V) const {
813   // A Value with type {} or [0 x %t] needs no registers.
814   if (ValueVTs.empty())
815     return SDValue();
816 
817   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
818 
819   // Assemble the legal parts into the final values.
820   SmallVector<SDValue, 4> Values(ValueVTs.size());
821   SmallVector<SDValue, 8> Parts;
822   for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
823     // Copy the legal parts from the registers.
824     EVT ValueVT = ValueVTs[Value];
825     unsigned NumRegs = RegCount[Value];
826     MVT RegisterVT = isABIMangled() ? TLI.getRegisterTypeForCallingConv(
827                                           *DAG.getContext(),
828                                           CallConv.getValue(), RegVTs[Value])
829                                     : RegVTs[Value];
830 
831     Parts.resize(NumRegs);
832     for (unsigned i = 0; i != NumRegs; ++i) {
833       SDValue P;
834       if (!Flag) {
835         P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
836       } else {
837         P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag);
838         *Flag = P.getValue(2);
839       }
840 
841       Chain = P.getValue(1);
842       Parts[i] = P;
843 
844       // If the source register was virtual and if we know something about it,
845       // add an assert node.
846       if (!Register::isVirtualRegister(Regs[Part + i]) ||
847           !RegisterVT.isInteger())
848         continue;
849 
850       const FunctionLoweringInfo::LiveOutInfo *LOI =
851         FuncInfo.GetLiveOutRegInfo(Regs[Part+i]);
852       if (!LOI)
853         continue;
854 
855       unsigned RegSize = RegisterVT.getScalarSizeInBits();
856       unsigned NumSignBits = LOI->NumSignBits;
857       unsigned NumZeroBits = LOI->Known.countMinLeadingZeros();
858 
859       if (NumZeroBits == RegSize) {
860         // The current value is a zero.
861         // Explicitly express that as it would be easier for
862         // optimizations to kick in.
863         Parts[i] = DAG.getConstant(0, dl, RegisterVT);
864         continue;
865       }
866 
867       // FIXME: We capture more information than the dag can represent.  For
868       // now, just use the tightest assertzext/assertsext possible.
869       bool isSExt;
870       EVT FromVT(MVT::Other);
871       if (NumZeroBits) {
872         FromVT = EVT::getIntegerVT(*DAG.getContext(), RegSize - NumZeroBits);
873         isSExt = false;
874       } else if (NumSignBits > 1) {
875         FromVT =
876             EVT::getIntegerVT(*DAG.getContext(), RegSize - NumSignBits + 1);
877         isSExt = true;
878       } else {
879         continue;
880       }
881       // Add an assertion node.
882       assert(FromVT != MVT::Other);
883       Parts[i] = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
884                              RegisterVT, P, DAG.getValueType(FromVT));
885     }
886 
887     Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(), NumRegs,
888                                      RegisterVT, ValueVT, V, CallConv);
889     Part += NumRegs;
890     Parts.clear();
891   }
892 
893   return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(ValueVTs), Values);
894 }
895 
896 void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG,
897                                  const SDLoc &dl, SDValue &Chain, SDValue *Flag,
898                                  const Value *V,
899                                  ISD::NodeType PreferredExtendType) const {
900   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
901   ISD::NodeType ExtendKind = PreferredExtendType;
902 
903   // Get the list of the values's legal parts.
904   unsigned NumRegs = Regs.size();
905   SmallVector<SDValue, 8> Parts(NumRegs);
906   for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
907     unsigned NumParts = RegCount[Value];
908 
909     MVT RegisterVT = isABIMangled() ? TLI.getRegisterTypeForCallingConv(
910                                           *DAG.getContext(),
911                                           CallConv.getValue(), RegVTs[Value])
912                                     : RegVTs[Value];
913 
914     if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT))
915       ExtendKind = ISD::ZERO_EXTEND;
916 
917     getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value), &Parts[Part],
918                    NumParts, RegisterVT, V, CallConv, ExtendKind);
919     Part += NumParts;
920   }
921 
922   // Copy the parts into the registers.
923   SmallVector<SDValue, 8> Chains(NumRegs);
924   for (unsigned i = 0; i != NumRegs; ++i) {
925     SDValue Part;
926     if (!Flag) {
927       Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
928     } else {
929       Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag);
930       *Flag = Part.getValue(1);
931     }
932 
933     Chains[i] = Part.getValue(0);
934   }
935 
936   if (NumRegs == 1 || Flag)
937     // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is
938     // flagged to it. That is the CopyToReg nodes and the user are considered
939     // a single scheduling unit. If we create a TokenFactor and return it as
940     // chain, then the TokenFactor is both a predecessor (operand) of the
941     // user as well as a successor (the TF operands are flagged to the user).
942     // c1, f1 = CopyToReg
943     // c2, f2 = CopyToReg
944     // c3     = TokenFactor c1, c2
945     // ...
946     //        = op c3, ..., f2
947     Chain = Chains[NumRegs-1];
948   else
949     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
950 }
951 
952 void RegsForValue::AddInlineAsmOperands(unsigned Code, bool HasMatching,
953                                         unsigned MatchingIdx, const SDLoc &dl,
954                                         SelectionDAG &DAG,
955                                         std::vector<SDValue> &Ops) const {
956   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
957 
958   unsigned Flag = InlineAsm::getFlagWord(Code, Regs.size());
959   if (HasMatching)
960     Flag = InlineAsm::getFlagWordForMatchingOp(Flag, MatchingIdx);
961   else if (!Regs.empty() && Register::isVirtualRegister(Regs.front())) {
962     // Put the register class of the virtual registers in the flag word.  That
963     // way, later passes can recompute register class constraints for inline
964     // assembly as well as normal instructions.
965     // Don't do this for tied operands that can use the regclass information
966     // from the def.
967     const MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
968     const TargetRegisterClass *RC = MRI.getRegClass(Regs.front());
969     Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID());
970   }
971 
972   SDValue Res = DAG.getTargetConstant(Flag, dl, MVT::i32);
973   Ops.push_back(Res);
974 
975   if (Code == InlineAsm::Kind_Clobber) {
976     // Clobbers should always have a 1:1 mapping with registers, and may
977     // reference registers that have illegal (e.g. vector) types. Hence, we
978     // shouldn't try to apply any sort of splitting logic to them.
979     assert(Regs.size() == RegVTs.size() && Regs.size() == ValueVTs.size() &&
980            "No 1:1 mapping from clobbers to regs?");
981     unsigned SP = TLI.getStackPointerRegisterToSaveRestore();
982     (void)SP;
983     for (unsigned I = 0, E = ValueVTs.size(); I != E; ++I) {
984       Ops.push_back(DAG.getRegister(Regs[I], RegVTs[I]));
985       assert(
986           (Regs[I] != SP ||
987            DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment()) &&
988           "If we clobbered the stack pointer, MFI should know about it.");
989     }
990     return;
991   }
992 
993   for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
994     unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value]);
995     MVT RegisterVT = RegVTs[Value];
996     for (unsigned i = 0; i != NumRegs; ++i) {
997       assert(Reg < Regs.size() && "Mismatch in # registers expected");
998       unsigned TheReg = Regs[Reg++];
999       Ops.push_back(DAG.getRegister(TheReg, RegisterVT));
1000     }
1001   }
1002 }
1003 
1004 SmallVector<std::pair<unsigned, unsigned>, 4>
1005 RegsForValue::getRegsAndSizes() const {
1006   SmallVector<std::pair<unsigned, unsigned>, 4> OutVec;
1007   unsigned I = 0;
1008   for (auto CountAndVT : zip_first(RegCount, RegVTs)) {
1009     unsigned RegCount = std::get<0>(CountAndVT);
1010     MVT RegisterVT = std::get<1>(CountAndVT);
1011     unsigned RegisterSize = RegisterVT.getSizeInBits();
1012     for (unsigned E = I + RegCount; I != E; ++I)
1013       OutVec.push_back(std::make_pair(Regs[I], RegisterSize));
1014   }
1015   return OutVec;
1016 }
1017 
1018 void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis *aa,
1019                                const TargetLibraryInfo *li) {
1020   AA = aa;
1021   GFI = gfi;
1022   LibInfo = li;
1023   DL = &DAG.getDataLayout();
1024   Context = DAG.getContext();
1025   LPadToCallSiteMap.clear();
1026   SL->init(DAG.getTargetLoweringInfo(), TM, DAG.getDataLayout());
1027 }
1028 
1029 void SelectionDAGBuilder::clear() {
1030   NodeMap.clear();
1031   UnusedArgNodeMap.clear();
1032   PendingLoads.clear();
1033   PendingExports.clear();
1034   PendingConstrainedFP.clear();
1035   PendingConstrainedFPStrict.clear();
1036   CurInst = nullptr;
1037   HasTailCall = false;
1038   SDNodeOrder = LowestSDNodeOrder;
1039   StatepointLowering.clear();
1040 }
1041 
1042 void SelectionDAGBuilder::clearDanglingDebugInfo() {
1043   DanglingDebugInfoMap.clear();
1044 }
1045 
1046 // Update DAG root to include dependencies on Pending chains.
1047 SDValue SelectionDAGBuilder::updateRoot(SmallVectorImpl<SDValue> &Pending) {
1048   SDValue Root = DAG.getRoot();
1049 
1050   if (Pending.empty())
1051     return Root;
1052 
1053   // Add current root to PendingChains, unless we already indirectly
1054   // depend on it.
1055   if (Root.getOpcode() != ISD::EntryToken) {
1056     unsigned i = 0, e = Pending.size();
1057     for (; i != e; ++i) {
1058       assert(Pending[i].getNode()->getNumOperands() > 1);
1059       if (Pending[i].getNode()->getOperand(0) == Root)
1060         break;  // Don't add the root if we already indirectly depend on it.
1061     }
1062 
1063     if (i == e)
1064       Pending.push_back(Root);
1065   }
1066 
1067   if (Pending.size() == 1)
1068     Root = Pending[0];
1069   else
1070     Root = DAG.getTokenFactor(getCurSDLoc(), Pending);
1071 
1072   DAG.setRoot(Root);
1073   Pending.clear();
1074   return Root;
1075 }
1076 
1077 SDValue SelectionDAGBuilder::getMemoryRoot() {
1078   return updateRoot(PendingLoads);
1079 }
1080 
1081 SDValue SelectionDAGBuilder::getRoot() {
1082   // Chain up all pending constrained intrinsics together with all
1083   // pending loads, by simply appending them to PendingLoads and
1084   // then calling getMemoryRoot().
1085   PendingLoads.reserve(PendingLoads.size() +
1086                        PendingConstrainedFP.size() +
1087                        PendingConstrainedFPStrict.size());
1088   PendingLoads.append(PendingConstrainedFP.begin(),
1089                       PendingConstrainedFP.end());
1090   PendingLoads.append(PendingConstrainedFPStrict.begin(),
1091                       PendingConstrainedFPStrict.end());
1092   PendingConstrainedFP.clear();
1093   PendingConstrainedFPStrict.clear();
1094   return getMemoryRoot();
1095 }
1096 
1097 SDValue SelectionDAGBuilder::getControlRoot() {
1098   // We need to emit pending fpexcept.strict constrained intrinsics,
1099   // so append them to the PendingExports list.
1100   PendingExports.append(PendingConstrainedFPStrict.begin(),
1101                         PendingConstrainedFPStrict.end());
1102   PendingConstrainedFPStrict.clear();
1103   return updateRoot(PendingExports);
1104 }
1105 
1106 void SelectionDAGBuilder::visit(const Instruction &I) {
1107   // Set up outgoing PHI node register values before emitting the terminator.
1108   if (I.isTerminator()) {
1109     HandlePHINodesInSuccessorBlocks(I.getParent());
1110   }
1111 
1112   // Increase the SDNodeOrder if dealing with a non-debug instruction.
1113   if (!isa<DbgInfoIntrinsic>(I))
1114     ++SDNodeOrder;
1115 
1116   CurInst = &I;
1117 
1118   visit(I.getOpcode(), I);
1119 
1120   if (auto *FPMO = dyn_cast<FPMathOperator>(&I)) {
1121     // ConstrainedFPIntrinsics handle their own FMF.
1122     if (!isa<ConstrainedFPIntrinsic>(&I)) {
1123       // Propagate the fast-math-flags of this IR instruction to the DAG node that
1124       // maps to this instruction.
1125       // TODO: We could handle all flags (nsw, etc) here.
1126       // TODO: If an IR instruction maps to >1 node, only the final node will have
1127       //       flags set.
1128       if (SDNode *Node = getNodeForIRValue(&I)) {
1129         SDNodeFlags IncomingFlags;
1130         IncomingFlags.copyFMF(*FPMO);
1131         if (!Node->getFlags().isDefined())
1132           Node->setFlags(IncomingFlags);
1133         else
1134           Node->intersectFlagsWith(IncomingFlags);
1135       }
1136     }
1137   }
1138 
1139   if (!I.isTerminator() && !HasTailCall &&
1140       !isa<GCStatepointInst>(I)) // statepoints handle their exports internally
1141     CopyToExportRegsIfNeeded(&I);
1142 
1143   CurInst = nullptr;
1144 }
1145 
1146 void SelectionDAGBuilder::visitPHI(const PHINode &) {
1147   llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!");
1148 }
1149 
1150 void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) {
1151   // Note: this doesn't use InstVisitor, because it has to work with
1152   // ConstantExpr's in addition to instructions.
1153   switch (Opcode) {
1154   default: llvm_unreachable("Unknown instruction type encountered!");
1155     // Build the switch statement using the Instruction.def file.
1156 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1157     case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
1158 #include "llvm/IR/Instruction.def"
1159   }
1160 }
1161 
1162 void SelectionDAGBuilder::dropDanglingDebugInfo(const DILocalVariable *Variable,
1163                                                 const DIExpression *Expr) {
1164   auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) {
1165     const DbgValueInst *DI = DDI.getDI();
1166     DIVariable *DanglingVariable = DI->getVariable();
1167     DIExpression *DanglingExpr = DI->getExpression();
1168     if (DanglingVariable == Variable && Expr->fragmentsOverlap(DanglingExpr)) {
1169       LLVM_DEBUG(dbgs() << "Dropping dangling debug info for " << *DI << "\n");
1170       return true;
1171     }
1172     return false;
1173   };
1174 
1175   for (auto &DDIMI : DanglingDebugInfoMap) {
1176     DanglingDebugInfoVector &DDIV = DDIMI.second;
1177 
1178     // If debug info is to be dropped, run it through final checks to see
1179     // whether it can be salvaged.
1180     for (auto &DDI : DDIV)
1181       if (isMatchingDbgValue(DDI))
1182         salvageUnresolvedDbgValue(DDI);
1183 
1184     DDIV.erase(remove_if(DDIV, isMatchingDbgValue), DDIV.end());
1185   }
1186 }
1187 
1188 // resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V,
1189 // generate the debug data structures now that we've seen its definition.
1190 void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V,
1191                                                    SDValue Val) {
1192   auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V);
1193   if (DanglingDbgInfoIt == DanglingDebugInfoMap.end())
1194     return;
1195 
1196   DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
1197   for (auto &DDI : DDIV) {
1198     const DbgValueInst *DI = DDI.getDI();
1199     assert(DI && "Ill-formed DanglingDebugInfo");
1200     DebugLoc dl = DDI.getdl();
1201     unsigned ValSDNodeOrder = Val.getNode()->getIROrder();
1202     unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
1203     DILocalVariable *Variable = DI->getVariable();
1204     DIExpression *Expr = DI->getExpression();
1205     assert(Variable->isValidLocationForIntrinsic(dl) &&
1206            "Expected inlined-at fields to agree");
1207     SDDbgValue *SDV;
1208     if (Val.getNode()) {
1209       // FIXME: I doubt that it is correct to resolve a dangling DbgValue as a
1210       // FuncArgumentDbgValue (it would be hoisted to the function entry, and if
1211       // we couldn't resolve it directly when examining the DbgValue intrinsic
1212       // in the first place we should not be more successful here). Unless we
1213       // have some test case that prove this to be correct we should avoid
1214       // calling EmitFuncArgumentDbgValue here.
1215       if (!EmitFuncArgumentDbgValue(V, Variable, Expr, dl, false, Val)) {
1216         LLVM_DEBUG(dbgs() << "Resolve dangling debug info [order="
1217                           << DbgSDNodeOrder << "] for:\n  " << *DI << "\n");
1218         LLVM_DEBUG(dbgs() << "  By mapping to:\n    "; Val.dump());
1219         // Increase the SDNodeOrder for the DbgValue here to make sure it is
1220         // inserted after the definition of Val when emitting the instructions
1221         // after ISel. An alternative could be to teach
1222         // ScheduleDAGSDNodes::EmitSchedule to delay the insertion properly.
1223         LLVM_DEBUG(if (ValSDNodeOrder > DbgSDNodeOrder) dbgs()
1224                    << "changing SDNodeOrder from " << DbgSDNodeOrder << " to "
1225                    << ValSDNodeOrder << "\n");
1226         SDV = getDbgValue(Val, Variable, Expr, dl,
1227                           std::max(DbgSDNodeOrder, ValSDNodeOrder));
1228         DAG.AddDbgValue(SDV, Val.getNode(), false);
1229       } else
1230         LLVM_DEBUG(dbgs() << "Resolved dangling debug info for " << *DI
1231                           << "in EmitFuncArgumentDbgValue\n");
1232     } else {
1233       LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1234       auto Undef =
1235           UndefValue::get(DDI.getDI()->getVariableLocation()->getType());
1236       auto SDV =
1237           DAG.getConstantDbgValue(Variable, Expr, Undef, dl, DbgSDNodeOrder);
1238       DAG.AddDbgValue(SDV, nullptr, false);
1239     }
1240   }
1241   DDIV.clear();
1242 }
1243 
1244 void SelectionDAGBuilder::salvageUnresolvedDbgValue(DanglingDebugInfo &DDI) {
1245   Value *V = DDI.getDI()->getValue();
1246   DILocalVariable *Var = DDI.getDI()->getVariable();
1247   DIExpression *Expr = DDI.getDI()->getExpression();
1248   DebugLoc DL = DDI.getdl();
1249   DebugLoc InstDL = DDI.getDI()->getDebugLoc();
1250   unsigned SDOrder = DDI.getSDNodeOrder();
1251 
1252   // Currently we consider only dbg.value intrinsics -- we tell the salvager
1253   // that DW_OP_stack_value is desired.
1254   assert(isa<DbgValueInst>(DDI.getDI()));
1255   bool StackValue = true;
1256 
1257   // Can this Value can be encoded without any further work?
1258   if (handleDebugValue(V, Var, Expr, DL, InstDL, SDOrder))
1259     return;
1260 
1261   // Attempt to salvage back through as many instructions as possible. Bail if
1262   // a non-instruction is seen, such as a constant expression or global
1263   // variable. FIXME: Further work could recover those too.
1264   while (isa<Instruction>(V)) {
1265     Instruction &VAsInst = *cast<Instruction>(V);
1266     DIExpression *NewExpr = salvageDebugInfoImpl(VAsInst, Expr, StackValue);
1267 
1268     // If we cannot salvage any further, and haven't yet found a suitable debug
1269     // expression, bail out.
1270     if (!NewExpr)
1271       break;
1272 
1273     // New value and expr now represent this debuginfo.
1274     V = VAsInst.getOperand(0);
1275     Expr = NewExpr;
1276 
1277     // Some kind of simplification occurred: check whether the operand of the
1278     // salvaged debug expression can be encoded in this DAG.
1279     if (handleDebugValue(V, Var, Expr, DL, InstDL, SDOrder)) {
1280       LLVM_DEBUG(dbgs() << "Salvaged debug location info for:\n  "
1281                         << DDI.getDI() << "\nBy stripping back to:\n  " << V);
1282       return;
1283     }
1284   }
1285 
1286   // This was the final opportunity to salvage this debug information, and it
1287   // couldn't be done. Place an undef DBG_VALUE at this location to terminate
1288   // any earlier variable location.
1289   auto Undef = UndefValue::get(DDI.getDI()->getVariableLocation()->getType());
1290   auto SDV = DAG.getConstantDbgValue(Var, Expr, Undef, DL, SDNodeOrder);
1291   DAG.AddDbgValue(SDV, nullptr, false);
1292 
1293   LLVM_DEBUG(dbgs() << "Dropping debug value info for:\n  " << DDI.getDI()
1294                     << "\n");
1295   LLVM_DEBUG(dbgs() << "  Last seen at:\n    " << *DDI.getDI()->getOperand(0)
1296                     << "\n");
1297 }
1298 
1299 bool SelectionDAGBuilder::handleDebugValue(const Value *V, DILocalVariable *Var,
1300                                            DIExpression *Expr, DebugLoc dl,
1301                                            DebugLoc InstDL, unsigned Order) {
1302   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1303   SDDbgValue *SDV;
1304   if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V) ||
1305       isa<ConstantPointerNull>(V)) {
1306     SDV = DAG.getConstantDbgValue(Var, Expr, V, dl, SDNodeOrder);
1307     DAG.AddDbgValue(SDV, nullptr, false);
1308     return true;
1309   }
1310 
1311   // If the Value is a frame index, we can create a FrameIndex debug value
1312   // without relying on the DAG at all.
1313   if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1314     auto SI = FuncInfo.StaticAllocaMap.find(AI);
1315     if (SI != FuncInfo.StaticAllocaMap.end()) {
1316       auto SDV =
1317           DAG.getFrameIndexDbgValue(Var, Expr, SI->second,
1318                                     /*IsIndirect*/ false, dl, SDNodeOrder);
1319       // Do not attach the SDNodeDbgValue to an SDNode: this variable location
1320       // is still available even if the SDNode gets optimized out.
1321       DAG.AddDbgValue(SDV, nullptr, false);
1322       return true;
1323     }
1324   }
1325 
1326   // Do not use getValue() in here; we don't want to generate code at
1327   // this point if it hasn't been done yet.
1328   SDValue N = NodeMap[V];
1329   if (!N.getNode() && isa<Argument>(V)) // Check unused arguments map.
1330     N = UnusedArgNodeMap[V];
1331   if (N.getNode()) {
1332     if (EmitFuncArgumentDbgValue(V, Var, Expr, dl, false, N))
1333       return true;
1334     SDV = getDbgValue(N, Var, Expr, dl, SDNodeOrder);
1335     DAG.AddDbgValue(SDV, N.getNode(), false);
1336     return true;
1337   }
1338 
1339   // Special rules apply for the first dbg.values of parameter variables in a
1340   // function. Identify them by the fact they reference Argument Values, that
1341   // they're parameters, and they are parameters of the current function. We
1342   // need to let them dangle until they get an SDNode.
1343   bool IsParamOfFunc = isa<Argument>(V) && Var->isParameter() &&
1344                        !InstDL.getInlinedAt();
1345   if (!IsParamOfFunc) {
1346     // The value is not used in this block yet (or it would have an SDNode).
1347     // We still want the value to appear for the user if possible -- if it has
1348     // an associated VReg, we can refer to that instead.
1349     auto VMI = FuncInfo.ValueMap.find(V);
1350     if (VMI != FuncInfo.ValueMap.end()) {
1351       unsigned Reg = VMI->second;
1352       // If this is a PHI node, it may be split up into several MI PHI nodes
1353       // (in FunctionLoweringInfo::set).
1354       RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg,
1355                        V->getType(), None);
1356       if (RFV.occupiesMultipleRegs()) {
1357         unsigned Offset = 0;
1358         unsigned BitsToDescribe = 0;
1359         if (auto VarSize = Var->getSizeInBits())
1360           BitsToDescribe = *VarSize;
1361         if (auto Fragment = Expr->getFragmentInfo())
1362           BitsToDescribe = Fragment->SizeInBits;
1363         for (auto RegAndSize : RFV.getRegsAndSizes()) {
1364           unsigned RegisterSize = RegAndSize.second;
1365           // Bail out if all bits are described already.
1366           if (Offset >= BitsToDescribe)
1367             break;
1368           unsigned FragmentSize = (Offset + RegisterSize > BitsToDescribe)
1369               ? BitsToDescribe - Offset
1370               : RegisterSize;
1371           auto FragmentExpr = DIExpression::createFragmentExpression(
1372               Expr, Offset, FragmentSize);
1373           if (!FragmentExpr)
1374               continue;
1375           SDV = DAG.getVRegDbgValue(Var, *FragmentExpr, RegAndSize.first,
1376                                     false, dl, SDNodeOrder);
1377           DAG.AddDbgValue(SDV, nullptr, false);
1378           Offset += RegisterSize;
1379         }
1380       } else {
1381         SDV = DAG.getVRegDbgValue(Var, Expr, Reg, false, dl, SDNodeOrder);
1382         DAG.AddDbgValue(SDV, nullptr, false);
1383       }
1384       return true;
1385     }
1386   }
1387 
1388   return false;
1389 }
1390 
1391 void SelectionDAGBuilder::resolveOrClearDbgInfo() {
1392   // Try to fixup any remaining dangling debug info -- and drop it if we can't.
1393   for (auto &Pair : DanglingDebugInfoMap)
1394     for (auto &DDI : Pair.second)
1395       salvageUnresolvedDbgValue(DDI);
1396   clearDanglingDebugInfo();
1397 }
1398 
1399 /// getCopyFromRegs - If there was virtual register allocated for the value V
1400 /// emit CopyFromReg of the specified type Ty. Return empty SDValue() otherwise.
1401 SDValue SelectionDAGBuilder::getCopyFromRegs(const Value *V, Type *Ty) {
1402   DenseMap<const Value *, Register>::iterator It = FuncInfo.ValueMap.find(V);
1403   SDValue Result;
1404 
1405   if (It != FuncInfo.ValueMap.end()) {
1406     Register InReg = It->second;
1407 
1408     RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
1409                      DAG.getDataLayout(), InReg, Ty,
1410                      None); // This is not an ABI copy.
1411     SDValue Chain = DAG.getEntryNode();
1412     Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr,
1413                                  V);
1414     resolveDanglingDebugInfo(V, Result);
1415   }
1416 
1417   return Result;
1418 }
1419 
1420 /// getValue - Return an SDValue for the given Value.
1421 SDValue SelectionDAGBuilder::getValue(const Value *V) {
1422   // If we already have an SDValue for this value, use it. It's important
1423   // to do this first, so that we don't create a CopyFromReg if we already
1424   // have a regular SDValue.
1425   SDValue &N = NodeMap[V];
1426   if (N.getNode()) return N;
1427 
1428   // If there's a virtual register allocated and initialized for this
1429   // value, use it.
1430   if (SDValue copyFromReg = getCopyFromRegs(V, V->getType()))
1431     return copyFromReg;
1432 
1433   // Otherwise create a new SDValue and remember it.
1434   SDValue Val = getValueImpl(V);
1435   NodeMap[V] = Val;
1436   resolveDanglingDebugInfo(V, Val);
1437   return Val;
1438 }
1439 
1440 /// getNonRegisterValue - Return an SDValue for the given Value, but
1441 /// don't look in FuncInfo.ValueMap for a virtual register.
1442 SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) {
1443   // If we already have an SDValue for this value, use it.
1444   SDValue &N = NodeMap[V];
1445   if (N.getNode()) {
1446     if (isa<ConstantSDNode>(N) || isa<ConstantFPSDNode>(N)) {
1447       // Remove the debug location from the node as the node is about to be used
1448       // in a location which may differ from the original debug location.  This
1449       // is relevant to Constant and ConstantFP nodes because they can appear
1450       // as constant expressions inside PHI nodes.
1451       N->setDebugLoc(DebugLoc());
1452     }
1453     return N;
1454   }
1455 
1456   // Otherwise create a new SDValue and remember it.
1457   SDValue Val = getValueImpl(V);
1458   NodeMap[V] = Val;
1459   resolveDanglingDebugInfo(V, Val);
1460   return Val;
1461 }
1462 
1463 /// getValueImpl - Helper function for getValue and getNonRegisterValue.
1464 /// Create an SDValue for the given value.
1465 SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
1466   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1467 
1468   if (const Constant *C = dyn_cast<Constant>(V)) {
1469     EVT VT = TLI.getValueType(DAG.getDataLayout(), V->getType(), true);
1470 
1471     if (const ConstantInt *CI = dyn_cast<ConstantInt>(C))
1472       return DAG.getConstant(*CI, getCurSDLoc(), VT);
1473 
1474     if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
1475       return DAG.getGlobalAddress(GV, getCurSDLoc(), VT);
1476 
1477     if (isa<ConstantPointerNull>(C)) {
1478       unsigned AS = V->getType()->getPointerAddressSpace();
1479       return DAG.getConstant(0, getCurSDLoc(),
1480                              TLI.getPointerTy(DAG.getDataLayout(), AS));
1481     }
1482 
1483     if (match(C, m_VScale(DAG.getDataLayout())))
1484       return DAG.getVScale(getCurSDLoc(), VT, APInt(VT.getSizeInBits(), 1));
1485 
1486     if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
1487       return DAG.getConstantFP(*CFP, getCurSDLoc(), VT);
1488 
1489     if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
1490       return DAG.getUNDEF(VT);
1491 
1492     if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
1493       visit(CE->getOpcode(), *CE);
1494       SDValue N1 = NodeMap[V];
1495       assert(N1.getNode() && "visit didn't populate the NodeMap!");
1496       return N1;
1497     }
1498 
1499     if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
1500       SmallVector<SDValue, 4> Constants;
1501       for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end();
1502            OI != OE; ++OI) {
1503         SDNode *Val = getValue(*OI).getNode();
1504         // If the operand is an empty aggregate, there are no values.
1505         if (!Val) continue;
1506         // Add each leaf value from the operand to the Constants list
1507         // to form a flattened list of all the values.
1508         for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1509           Constants.push_back(SDValue(Val, i));
1510       }
1511 
1512       return DAG.getMergeValues(Constants, getCurSDLoc());
1513     }
1514 
1515     if (const ConstantDataSequential *CDS =
1516           dyn_cast<ConstantDataSequential>(C)) {
1517       SmallVector<SDValue, 4> Ops;
1518       for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1519         SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode();
1520         // Add each leaf value from the operand to the Constants list
1521         // to form a flattened list of all the values.
1522         for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1523           Ops.push_back(SDValue(Val, i));
1524       }
1525 
1526       if (isa<ArrayType>(CDS->getType()))
1527         return DAG.getMergeValues(Ops, getCurSDLoc());
1528       return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1529     }
1530 
1531     if (C->getType()->isStructTy() || C->getType()->isArrayTy()) {
1532       assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
1533              "Unknown struct or array constant!");
1534 
1535       SmallVector<EVT, 4> ValueVTs;
1536       ComputeValueVTs(TLI, DAG.getDataLayout(), C->getType(), ValueVTs);
1537       unsigned NumElts = ValueVTs.size();
1538       if (NumElts == 0)
1539         return SDValue(); // empty struct
1540       SmallVector<SDValue, 4> Constants(NumElts);
1541       for (unsigned i = 0; i != NumElts; ++i) {
1542         EVT EltVT = ValueVTs[i];
1543         if (isa<UndefValue>(C))
1544           Constants[i] = DAG.getUNDEF(EltVT);
1545         else if (EltVT.isFloatingPoint())
1546           Constants[i] = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1547         else
1548           Constants[i] = DAG.getConstant(0, getCurSDLoc(), EltVT);
1549       }
1550 
1551       return DAG.getMergeValues(Constants, getCurSDLoc());
1552     }
1553 
1554     if (const BlockAddress *BA = dyn_cast<BlockAddress>(C))
1555       return DAG.getBlockAddress(BA, VT);
1556 
1557     VectorType *VecTy = cast<VectorType>(V->getType());
1558 
1559     // Now that we know the number and type of the elements, get that number of
1560     // elements into the Ops array based on what kind of constant it is.
1561     if (const ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
1562       SmallVector<SDValue, 16> Ops;
1563       unsigned NumElements = cast<FixedVectorType>(VecTy)->getNumElements();
1564       for (unsigned i = 0; i != NumElements; ++i)
1565         Ops.push_back(getValue(CV->getOperand(i)));
1566 
1567       return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1568     } else if (isa<ConstantAggregateZero>(C)) {
1569       EVT EltVT =
1570           TLI.getValueType(DAG.getDataLayout(), VecTy->getElementType());
1571 
1572       SDValue Op;
1573       if (EltVT.isFloatingPoint())
1574         Op = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1575       else
1576         Op = DAG.getConstant(0, getCurSDLoc(), EltVT);
1577 
1578       if (isa<ScalableVectorType>(VecTy))
1579         return NodeMap[V] = DAG.getSplatVector(VT, getCurSDLoc(), Op);
1580       else {
1581         SmallVector<SDValue, 16> Ops;
1582         Ops.assign(cast<FixedVectorType>(VecTy)->getNumElements(), Op);
1583         return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1584       }
1585     }
1586     llvm_unreachable("Unknown vector constant");
1587   }
1588 
1589   // If this is a static alloca, generate it as the frameindex instead of
1590   // computation.
1591   if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1592     DenseMap<const AllocaInst*, int>::iterator SI =
1593       FuncInfo.StaticAllocaMap.find(AI);
1594     if (SI != FuncInfo.StaticAllocaMap.end())
1595       return DAG.getFrameIndex(SI->second,
1596                                TLI.getFrameIndexTy(DAG.getDataLayout()));
1597   }
1598 
1599   // If this is an instruction which fast-isel has deferred, select it now.
1600   if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
1601     unsigned InReg = FuncInfo.InitializeRegForValue(Inst);
1602 
1603     RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg,
1604                      Inst->getType(), getABIRegCopyCC(V));
1605     SDValue Chain = DAG.getEntryNode();
1606     return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
1607   }
1608 
1609   if (const MetadataAsValue *MD = dyn_cast<MetadataAsValue>(V)) {
1610     return DAG.getMDNode(cast<MDNode>(MD->getMetadata()));
1611   }
1612   llvm_unreachable("Can't get register for value!");
1613 }
1614 
1615 void SelectionDAGBuilder::visitCatchPad(const CatchPadInst &I) {
1616   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1617   bool IsMSVCCXX = Pers == EHPersonality::MSVC_CXX;
1618   bool IsCoreCLR = Pers == EHPersonality::CoreCLR;
1619   bool IsSEH = isAsynchronousEHPersonality(Pers);
1620   MachineBasicBlock *CatchPadMBB = FuncInfo.MBB;
1621   if (!IsSEH)
1622     CatchPadMBB->setIsEHScopeEntry();
1623   // In MSVC C++ and CoreCLR, catchblocks are funclets and need prologues.
1624   if (IsMSVCCXX || IsCoreCLR)
1625     CatchPadMBB->setIsEHFuncletEntry();
1626 }
1627 
1628 void SelectionDAGBuilder::visitCatchRet(const CatchReturnInst &I) {
1629   // Update machine-CFG edge.
1630   MachineBasicBlock *TargetMBB = FuncInfo.MBBMap[I.getSuccessor()];
1631   FuncInfo.MBB->addSuccessor(TargetMBB);
1632 
1633   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1634   bool IsSEH = isAsynchronousEHPersonality(Pers);
1635   if (IsSEH) {
1636     // If this is not a fall-through branch or optimizations are switched off,
1637     // emit the branch.
1638     if (TargetMBB != NextBlock(FuncInfo.MBB) ||
1639         TM.getOptLevel() == CodeGenOpt::None)
1640       DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
1641                               getControlRoot(), DAG.getBasicBlock(TargetMBB)));
1642     return;
1643   }
1644 
1645   // Figure out the funclet membership for the catchret's successor.
1646   // This will be used by the FuncletLayout pass to determine how to order the
1647   // BB's.
1648   // A 'catchret' returns to the outer scope's color.
1649   Value *ParentPad = I.getCatchSwitchParentPad();
1650   const BasicBlock *SuccessorColor;
1651   if (isa<ConstantTokenNone>(ParentPad))
1652     SuccessorColor = &FuncInfo.Fn->getEntryBlock();
1653   else
1654     SuccessorColor = cast<Instruction>(ParentPad)->getParent();
1655   assert(SuccessorColor && "No parent funclet for catchret!");
1656   MachineBasicBlock *SuccessorColorMBB = FuncInfo.MBBMap[SuccessorColor];
1657   assert(SuccessorColorMBB && "No MBB for SuccessorColor!");
1658 
1659   // Create the terminator node.
1660   SDValue Ret = DAG.getNode(ISD::CATCHRET, getCurSDLoc(), MVT::Other,
1661                             getControlRoot(), DAG.getBasicBlock(TargetMBB),
1662                             DAG.getBasicBlock(SuccessorColorMBB));
1663   DAG.setRoot(Ret);
1664 }
1665 
1666 void SelectionDAGBuilder::visitCleanupPad(const CleanupPadInst &CPI) {
1667   // Don't emit any special code for the cleanuppad instruction. It just marks
1668   // the start of an EH scope/funclet.
1669   FuncInfo.MBB->setIsEHScopeEntry();
1670   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1671   if (Pers != EHPersonality::Wasm_CXX) {
1672     FuncInfo.MBB->setIsEHFuncletEntry();
1673     FuncInfo.MBB->setIsCleanupFuncletEntry();
1674   }
1675 }
1676 
1677 // For wasm, there's alwyas a single catch pad attached to a catchswitch, and
1678 // the control flow always stops at the single catch pad, as it does for a
1679 // cleanup pad. In case the exception caught is not of the types the catch pad
1680 // catches, it will be rethrown by a rethrow.
1681 static void findWasmUnwindDestinations(
1682     FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
1683     BranchProbability Prob,
1684     SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
1685         &UnwindDests) {
1686   while (EHPadBB) {
1687     const Instruction *Pad = EHPadBB->getFirstNonPHI();
1688     if (isa<CleanupPadInst>(Pad)) {
1689       // Stop on cleanup pads.
1690       UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1691       UnwindDests.back().first->setIsEHScopeEntry();
1692       break;
1693     } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
1694       // Add the catchpad handlers to the possible destinations. We don't
1695       // continue to the unwind destination of the catchswitch for wasm.
1696       for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
1697         UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob);
1698         UnwindDests.back().first->setIsEHScopeEntry();
1699       }
1700       break;
1701     } else {
1702       continue;
1703     }
1704   }
1705 }
1706 
1707 /// When an invoke or a cleanupret unwinds to the next EH pad, there are
1708 /// many places it could ultimately go. In the IR, we have a single unwind
1709 /// destination, but in the machine CFG, we enumerate all the possible blocks.
1710 /// This function skips over imaginary basic blocks that hold catchswitch
1711 /// instructions, and finds all the "real" machine
1712 /// basic block destinations. As those destinations may not be successors of
1713 /// EHPadBB, here we also calculate the edge probability to those destinations.
1714 /// The passed-in Prob is the edge probability to EHPadBB.
1715 static void findUnwindDestinations(
1716     FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
1717     BranchProbability Prob,
1718     SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
1719         &UnwindDests) {
1720   EHPersonality Personality =
1721     classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1722   bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX;
1723   bool IsCoreCLR = Personality == EHPersonality::CoreCLR;
1724   bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX;
1725   bool IsSEH = isAsynchronousEHPersonality(Personality);
1726 
1727   if (IsWasmCXX) {
1728     findWasmUnwindDestinations(FuncInfo, EHPadBB, Prob, UnwindDests);
1729     assert(UnwindDests.size() <= 1 &&
1730            "There should be at most one unwind destination for wasm");
1731     return;
1732   }
1733 
1734   while (EHPadBB) {
1735     const Instruction *Pad = EHPadBB->getFirstNonPHI();
1736     BasicBlock *NewEHPadBB = nullptr;
1737     if (isa<LandingPadInst>(Pad)) {
1738       // Stop on landingpads. They are not funclets.
1739       UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1740       break;
1741     } else if (isa<CleanupPadInst>(Pad)) {
1742       // Stop on cleanup pads. Cleanups are always funclet entries for all known
1743       // personalities.
1744       UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1745       UnwindDests.back().first->setIsEHScopeEntry();
1746       UnwindDests.back().first->setIsEHFuncletEntry();
1747       break;
1748     } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
1749       // Add the catchpad handlers to the possible destinations.
1750       for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
1751         UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob);
1752         // For MSVC++ and the CLR, catchblocks are funclets and need prologues.
1753         if (IsMSVCCXX || IsCoreCLR)
1754           UnwindDests.back().first->setIsEHFuncletEntry();
1755         if (!IsSEH)
1756           UnwindDests.back().first->setIsEHScopeEntry();
1757       }
1758       NewEHPadBB = CatchSwitch->getUnwindDest();
1759     } else {
1760       continue;
1761     }
1762 
1763     BranchProbabilityInfo *BPI = FuncInfo.BPI;
1764     if (BPI && NewEHPadBB)
1765       Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB);
1766     EHPadBB = NewEHPadBB;
1767   }
1768 }
1769 
1770 void SelectionDAGBuilder::visitCleanupRet(const CleanupReturnInst &I) {
1771   // Update successor info.
1772   SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
1773   auto UnwindDest = I.getUnwindDest();
1774   BranchProbabilityInfo *BPI = FuncInfo.BPI;
1775   BranchProbability UnwindDestProb =
1776       (BPI && UnwindDest)
1777           ? BPI->getEdgeProbability(FuncInfo.MBB->getBasicBlock(), UnwindDest)
1778           : BranchProbability::getZero();
1779   findUnwindDestinations(FuncInfo, UnwindDest, UnwindDestProb, UnwindDests);
1780   for (auto &UnwindDest : UnwindDests) {
1781     UnwindDest.first->setIsEHPad();
1782     addSuccessorWithProb(FuncInfo.MBB, UnwindDest.first, UnwindDest.second);
1783   }
1784   FuncInfo.MBB->normalizeSuccProbs();
1785 
1786   // Create the terminator node.
1787   SDValue Ret =
1788       DAG.getNode(ISD::CLEANUPRET, getCurSDLoc(), MVT::Other, getControlRoot());
1789   DAG.setRoot(Ret);
1790 }
1791 
1792 void SelectionDAGBuilder::visitCatchSwitch(const CatchSwitchInst &CSI) {
1793   report_fatal_error("visitCatchSwitch not yet implemented!");
1794 }
1795 
1796 void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
1797   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1798   auto &DL = DAG.getDataLayout();
1799   SDValue Chain = getControlRoot();
1800   SmallVector<ISD::OutputArg, 8> Outs;
1801   SmallVector<SDValue, 8> OutVals;
1802 
1803   // Calls to @llvm.experimental.deoptimize don't generate a return value, so
1804   // lower
1805   //
1806   //   %val = call <ty> @llvm.experimental.deoptimize()
1807   //   ret <ty> %val
1808   //
1809   // differently.
1810   if (I.getParent()->getTerminatingDeoptimizeCall()) {
1811     LowerDeoptimizingReturn();
1812     return;
1813   }
1814 
1815   if (!FuncInfo.CanLowerReturn) {
1816     unsigned DemoteReg = FuncInfo.DemoteRegister;
1817     const Function *F = I.getParent()->getParent();
1818 
1819     // Emit a store of the return value through the virtual register.
1820     // Leave Outs empty so that LowerReturn won't try to load return
1821     // registers the usual way.
1822     SmallVector<EVT, 1> PtrValueVTs;
1823     ComputeValueVTs(TLI, DL,
1824                     F->getReturnType()->getPointerTo(
1825                         DAG.getDataLayout().getAllocaAddrSpace()),
1826                     PtrValueVTs);
1827 
1828     SDValue RetPtr = DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(),
1829                                         DemoteReg, PtrValueVTs[0]);
1830     SDValue RetOp = getValue(I.getOperand(0));
1831 
1832     SmallVector<EVT, 4> ValueVTs, MemVTs;
1833     SmallVector<uint64_t, 4> Offsets;
1834     ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs, &MemVTs,
1835                     &Offsets);
1836     unsigned NumValues = ValueVTs.size();
1837 
1838     SmallVector<SDValue, 4> Chains(NumValues);
1839     Align BaseAlign = DL.getPrefTypeAlign(I.getOperand(0)->getType());
1840     for (unsigned i = 0; i != NumValues; ++i) {
1841       // An aggregate return value cannot wrap around the address space, so
1842       // offsets to its parts don't wrap either.
1843       SDValue Ptr = DAG.getObjectPtrOffset(getCurSDLoc(), RetPtr, Offsets[i]);
1844 
1845       SDValue Val = RetOp.getValue(RetOp.getResNo() + i);
1846       if (MemVTs[i] != ValueVTs[i])
1847         Val = DAG.getPtrExtOrTrunc(Val, getCurSDLoc(), MemVTs[i]);
1848       Chains[i] = DAG.getStore(
1849           Chain, getCurSDLoc(), Val,
1850           // FIXME: better loc info would be nice.
1851           Ptr, MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()),
1852           commonAlignment(BaseAlign, Offsets[i]));
1853     }
1854 
1855     Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
1856                         MVT::Other, Chains);
1857   } else if (I.getNumOperands() != 0) {
1858     SmallVector<EVT, 4> ValueVTs;
1859     ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs);
1860     unsigned NumValues = ValueVTs.size();
1861     if (NumValues) {
1862       SDValue RetOp = getValue(I.getOperand(0));
1863 
1864       const Function *F = I.getParent()->getParent();
1865 
1866       bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
1867           I.getOperand(0)->getType(), F->getCallingConv(),
1868           /*IsVarArg*/ false);
1869 
1870       ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
1871       if (F->getAttributes().hasAttribute(AttributeList::ReturnIndex,
1872                                           Attribute::SExt))
1873         ExtendKind = ISD::SIGN_EXTEND;
1874       else if (F->getAttributes().hasAttribute(AttributeList::ReturnIndex,
1875                                                Attribute::ZExt))
1876         ExtendKind = ISD::ZERO_EXTEND;
1877 
1878       LLVMContext &Context = F->getContext();
1879       bool RetInReg = F->getAttributes().hasAttribute(
1880           AttributeList::ReturnIndex, Attribute::InReg);
1881 
1882       for (unsigned j = 0; j != NumValues; ++j) {
1883         EVT VT = ValueVTs[j];
1884 
1885         if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
1886           VT = TLI.getTypeForExtReturn(Context, VT, ExtendKind);
1887 
1888         CallingConv::ID CC = F->getCallingConv();
1889 
1890         unsigned NumParts = TLI.getNumRegistersForCallingConv(Context, CC, VT);
1891         MVT PartVT = TLI.getRegisterTypeForCallingConv(Context, CC, VT);
1892         SmallVector<SDValue, 4> Parts(NumParts);
1893         getCopyToParts(DAG, getCurSDLoc(),
1894                        SDValue(RetOp.getNode(), RetOp.getResNo() + j),
1895                        &Parts[0], NumParts, PartVT, &I, CC, ExtendKind);
1896 
1897         // 'inreg' on function refers to return value
1898         ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1899         if (RetInReg)
1900           Flags.setInReg();
1901 
1902         if (I.getOperand(0)->getType()->isPointerTy()) {
1903           Flags.setPointer();
1904           Flags.setPointerAddrSpace(
1905               cast<PointerType>(I.getOperand(0)->getType())->getAddressSpace());
1906         }
1907 
1908         if (NeedsRegBlock) {
1909           Flags.setInConsecutiveRegs();
1910           if (j == NumValues - 1)
1911             Flags.setInConsecutiveRegsLast();
1912         }
1913 
1914         // Propagate extension type if any
1915         if (ExtendKind == ISD::SIGN_EXTEND)
1916           Flags.setSExt();
1917         else if (ExtendKind == ISD::ZERO_EXTEND)
1918           Flags.setZExt();
1919 
1920         for (unsigned i = 0; i < NumParts; ++i) {
1921           Outs.push_back(ISD::OutputArg(Flags, Parts[i].getValueType(),
1922                                         VT, /*isfixed=*/true, 0, 0));
1923           OutVals.push_back(Parts[i]);
1924         }
1925       }
1926     }
1927   }
1928 
1929   // Push in swifterror virtual register as the last element of Outs. This makes
1930   // sure swifterror virtual register will be returned in the swifterror
1931   // physical register.
1932   const Function *F = I.getParent()->getParent();
1933   if (TLI.supportSwiftError() &&
1934       F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
1935     assert(SwiftError.getFunctionArg() && "Need a swift error argument");
1936     ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1937     Flags.setSwiftError();
1938     Outs.push_back(ISD::OutputArg(Flags, EVT(TLI.getPointerTy(DL)) /*vt*/,
1939                                   EVT(TLI.getPointerTy(DL)) /*argvt*/,
1940                                   true /*isfixed*/, 1 /*origidx*/,
1941                                   0 /*partOffs*/));
1942     // Create SDNode for the swifterror virtual register.
1943     OutVals.push_back(
1944         DAG.getRegister(SwiftError.getOrCreateVRegUseAt(
1945                             &I, FuncInfo.MBB, SwiftError.getFunctionArg()),
1946                         EVT(TLI.getPointerTy(DL))));
1947   }
1948 
1949   bool isVarArg = DAG.getMachineFunction().getFunction().isVarArg();
1950   CallingConv::ID CallConv =
1951     DAG.getMachineFunction().getFunction().getCallingConv();
1952   Chain = DAG.getTargetLoweringInfo().LowerReturn(
1953       Chain, CallConv, isVarArg, Outs, OutVals, getCurSDLoc(), DAG);
1954 
1955   // Verify that the target's LowerReturn behaved as expected.
1956   assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
1957          "LowerReturn didn't return a valid chain!");
1958 
1959   // Update the DAG with the new chain value resulting from return lowering.
1960   DAG.setRoot(Chain);
1961 }
1962 
1963 /// CopyToExportRegsIfNeeded - If the given value has virtual registers
1964 /// created for it, emit nodes to copy the value into the virtual
1965 /// registers.
1966 void SelectionDAGBuilder::CopyToExportRegsIfNeeded(const Value *V) {
1967   // Skip empty types
1968   if (V->getType()->isEmptyTy())
1969     return;
1970 
1971   DenseMap<const Value *, Register>::iterator VMI = FuncInfo.ValueMap.find(V);
1972   if (VMI != FuncInfo.ValueMap.end()) {
1973     assert(!V->use_empty() && "Unused value assigned virtual registers!");
1974     CopyValueToVirtualRegister(V, VMI->second);
1975   }
1976 }
1977 
1978 /// ExportFromCurrentBlock - If this condition isn't known to be exported from
1979 /// the current basic block, add it to ValueMap now so that we'll get a
1980 /// CopyTo/FromReg.
1981 void SelectionDAGBuilder::ExportFromCurrentBlock(const Value *V) {
1982   // No need to export constants.
1983   if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
1984 
1985   // Already exported?
1986   if (FuncInfo.isExportedInst(V)) return;
1987 
1988   unsigned Reg = FuncInfo.InitializeRegForValue(V);
1989   CopyValueToVirtualRegister(V, Reg);
1990 }
1991 
1992 bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V,
1993                                                      const BasicBlock *FromBB) {
1994   // The operands of the setcc have to be in this block.  We don't know
1995   // how to export them from some other block.
1996   if (const Instruction *VI = dyn_cast<Instruction>(V)) {
1997     // Can export from current BB.
1998     if (VI->getParent() == FromBB)
1999       return true;
2000 
2001     // Is already exported, noop.
2002     return FuncInfo.isExportedInst(V);
2003   }
2004 
2005   // If this is an argument, we can export it if the BB is the entry block or
2006   // if it is already exported.
2007   if (isa<Argument>(V)) {
2008     if (FromBB == &FromBB->getParent()->getEntryBlock())
2009       return true;
2010 
2011     // Otherwise, can only export this if it is already exported.
2012     return FuncInfo.isExportedInst(V);
2013   }
2014 
2015   // Otherwise, constants can always be exported.
2016   return true;
2017 }
2018 
2019 /// Return branch probability calculated by BranchProbabilityInfo for IR blocks.
2020 BranchProbability
2021 SelectionDAGBuilder::getEdgeProbability(const MachineBasicBlock *Src,
2022                                         const MachineBasicBlock *Dst) const {
2023   BranchProbabilityInfo *BPI = FuncInfo.BPI;
2024   const BasicBlock *SrcBB = Src->getBasicBlock();
2025   const BasicBlock *DstBB = Dst->getBasicBlock();
2026   if (!BPI) {
2027     // If BPI is not available, set the default probability as 1 / N, where N is
2028     // the number of successors.
2029     auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
2030     return BranchProbability(1, SuccSize);
2031   }
2032   return BPI->getEdgeProbability(SrcBB, DstBB);
2033 }
2034 
2035 void SelectionDAGBuilder::addSuccessorWithProb(MachineBasicBlock *Src,
2036                                                MachineBasicBlock *Dst,
2037                                                BranchProbability Prob) {
2038   if (!FuncInfo.BPI)
2039     Src->addSuccessorWithoutProb(Dst);
2040   else {
2041     if (Prob.isUnknown())
2042       Prob = getEdgeProbability(Src, Dst);
2043     Src->addSuccessor(Dst, Prob);
2044   }
2045 }
2046 
2047 static bool InBlock(const Value *V, const BasicBlock *BB) {
2048   if (const Instruction *I = dyn_cast<Instruction>(V))
2049     return I->getParent() == BB;
2050   return true;
2051 }
2052 
2053 /// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
2054 /// This function emits a branch and is used at the leaves of an OR or an
2055 /// AND operator tree.
2056 void
2057 SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
2058                                                   MachineBasicBlock *TBB,
2059                                                   MachineBasicBlock *FBB,
2060                                                   MachineBasicBlock *CurBB,
2061                                                   MachineBasicBlock *SwitchBB,
2062                                                   BranchProbability TProb,
2063                                                   BranchProbability FProb,
2064                                                   bool InvertCond) {
2065   const BasicBlock *BB = CurBB->getBasicBlock();
2066 
2067   // If the leaf of the tree is a comparison, merge the condition into
2068   // the caseblock.
2069   if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
2070     // The operands of the cmp have to be in this block.  We don't know
2071     // how to export them from some other block.  If this is the first block
2072     // of the sequence, no exporting is needed.
2073     if (CurBB == SwitchBB ||
2074         (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
2075          isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
2076       ISD::CondCode Condition;
2077       if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
2078         ICmpInst::Predicate Pred =
2079             InvertCond ? IC->getInversePredicate() : IC->getPredicate();
2080         Condition = getICmpCondCode(Pred);
2081       } else {
2082         const FCmpInst *FC = cast<FCmpInst>(Cond);
2083         FCmpInst::Predicate Pred =
2084             InvertCond ? FC->getInversePredicate() : FC->getPredicate();
2085         Condition = getFCmpCondCode(Pred);
2086         if (TM.Options.NoNaNsFPMath)
2087           Condition = getFCmpCodeWithoutNaN(Condition);
2088       }
2089 
2090       CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1), nullptr,
2091                    TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
2092       SL->SwitchCases.push_back(CB);
2093       return;
2094     }
2095   }
2096 
2097   // Create a CaseBlock record representing this branch.
2098   ISD::CondCode Opc = InvertCond ? ISD::SETNE : ISD::SETEQ;
2099   CaseBlock CB(Opc, Cond, ConstantInt::getTrue(*DAG.getContext()),
2100                nullptr, TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
2101   SL->SwitchCases.push_back(CB);
2102 }
2103 
2104 void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
2105                                                MachineBasicBlock *TBB,
2106                                                MachineBasicBlock *FBB,
2107                                                MachineBasicBlock *CurBB,
2108                                                MachineBasicBlock *SwitchBB,
2109                                                Instruction::BinaryOps Opc,
2110                                                BranchProbability TProb,
2111                                                BranchProbability FProb,
2112                                                bool InvertCond) {
2113   // Skip over not part of the tree and remember to invert op and operands at
2114   // next level.
2115   Value *NotCond;
2116   if (match(Cond, m_OneUse(m_Not(m_Value(NotCond)))) &&
2117       InBlock(NotCond, CurBB->getBasicBlock())) {
2118     FindMergedConditions(NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
2119                          !InvertCond);
2120     return;
2121   }
2122 
2123   const Instruction *BOp = dyn_cast<Instruction>(Cond);
2124   // Compute the effective opcode for Cond, taking into account whether it needs
2125   // to be inverted, e.g.
2126   //   and (not (or A, B)), C
2127   // gets lowered as
2128   //   and (and (not A, not B), C)
2129   unsigned BOpc = 0;
2130   if (BOp) {
2131     BOpc = BOp->getOpcode();
2132     if (InvertCond) {
2133       if (BOpc == Instruction::And)
2134         BOpc = Instruction::Or;
2135       else if (BOpc == Instruction::Or)
2136         BOpc = Instruction::And;
2137     }
2138   }
2139 
2140   // If this node is not part of the or/and tree, emit it as a branch.
2141   if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) ||
2142       BOpc != unsigned(Opc) || !BOp->hasOneUse() ||
2143       BOp->getParent() != CurBB->getBasicBlock() ||
2144       !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) ||
2145       !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) {
2146     EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB,
2147                                  TProb, FProb, InvertCond);
2148     return;
2149   }
2150 
2151   //  Create TmpBB after CurBB.
2152   MachineFunction::iterator BBI(CurBB);
2153   MachineFunction &MF = DAG.getMachineFunction();
2154   MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
2155   CurBB->getParent()->insert(++BBI, TmpBB);
2156 
2157   if (Opc == Instruction::Or) {
2158     // Codegen X | Y as:
2159     // BB1:
2160     //   jmp_if_X TBB
2161     //   jmp TmpBB
2162     // TmpBB:
2163     //   jmp_if_Y TBB
2164     //   jmp FBB
2165     //
2166 
2167     // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
2168     // The requirement is that
2169     //   TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
2170     //     = TrueProb for original BB.
2171     // Assuming the original probabilities are A and B, one choice is to set
2172     // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
2173     // A/(1+B) and 2B/(1+B). This choice assumes that
2174     //   TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
2175     // Another choice is to assume TrueProb for BB1 equals to TrueProb for
2176     // TmpBB, but the math is more complicated.
2177 
2178     auto NewTrueProb = TProb / 2;
2179     auto NewFalseProb = TProb / 2 + FProb;
2180     // Emit the LHS condition.
2181     FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, SwitchBB, Opc,
2182                          NewTrueProb, NewFalseProb, InvertCond);
2183 
2184     // Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
2185     SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb};
2186     BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
2187     // Emit the RHS condition into TmpBB.
2188     FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc,
2189                          Probs[0], Probs[1], InvertCond);
2190   } else {
2191     assert(Opc == Instruction::And && "Unknown merge op!");
2192     // Codegen X & Y as:
2193     // BB1:
2194     //   jmp_if_X TmpBB
2195     //   jmp FBB
2196     // TmpBB:
2197     //   jmp_if_Y TBB
2198     //   jmp FBB
2199     //
2200     //  This requires creation of TmpBB after CurBB.
2201 
2202     // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
2203     // The requirement is that
2204     //   FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
2205     //     = FalseProb for original BB.
2206     // Assuming the original probabilities are A and B, one choice is to set
2207     // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
2208     // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
2209     // TrueProb for BB1 * FalseProb for TmpBB.
2210 
2211     auto NewTrueProb = TProb + FProb / 2;
2212     auto NewFalseProb = FProb / 2;
2213     // Emit the LHS condition.
2214     FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, SwitchBB, Opc,
2215                          NewTrueProb, NewFalseProb, InvertCond);
2216 
2217     // Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
2218     SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2};
2219     BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
2220     // Emit the RHS condition into TmpBB.
2221     FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc,
2222                          Probs[0], Probs[1], InvertCond);
2223   }
2224 }
2225 
2226 /// If the set of cases should be emitted as a series of branches, return true.
2227 /// If we should emit this as a bunch of and/or'd together conditions, return
2228 /// false.
2229 bool
2230 SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases) {
2231   if (Cases.size() != 2) return true;
2232 
2233   // If this is two comparisons of the same values or'd or and'd together, they
2234   // will get folded into a single comparison, so don't emit two blocks.
2235   if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
2236        Cases[0].CmpRHS == Cases[1].CmpRHS) ||
2237       (Cases[0].CmpRHS == Cases[1].CmpLHS &&
2238        Cases[0].CmpLHS == Cases[1].CmpRHS)) {
2239     return false;
2240   }
2241 
2242   // Handle: (X != null) | (Y != null) --> (X|Y) != 0
2243   // Handle: (X == null) & (Y == null) --> (X|Y) == 0
2244   if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
2245       Cases[0].CC == Cases[1].CC &&
2246       isa<Constant>(Cases[0].CmpRHS) &&
2247       cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
2248     if (Cases[0].CC == ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
2249       return false;
2250     if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
2251       return false;
2252   }
2253 
2254   return true;
2255 }
2256 
2257 void SelectionDAGBuilder::visitBr(const BranchInst &I) {
2258   MachineBasicBlock *BrMBB = FuncInfo.MBB;
2259 
2260   // Update machine-CFG edges.
2261   MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
2262 
2263   if (I.isUnconditional()) {
2264     // Update machine-CFG edges.
2265     BrMBB->addSuccessor(Succ0MBB);
2266 
2267     // If this is not a fall-through branch or optimizations are switched off,
2268     // emit the branch.
2269     if (Succ0MBB != NextBlock(BrMBB) || TM.getOptLevel() == CodeGenOpt::None)
2270       DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
2271                               MVT::Other, getControlRoot(),
2272                               DAG.getBasicBlock(Succ0MBB)));
2273 
2274     return;
2275   }
2276 
2277   // If this condition is one of the special cases we handle, do special stuff
2278   // now.
2279   const Value *CondVal = I.getCondition();
2280   MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
2281 
2282   // If this is a series of conditions that are or'd or and'd together, emit
2283   // this as a sequence of branches instead of setcc's with and/or operations.
2284   // As long as jumps are not expensive, this should improve performance.
2285   // For example, instead of something like:
2286   //     cmp A, B
2287   //     C = seteq
2288   //     cmp D, E
2289   //     F = setle
2290   //     or C, F
2291   //     jnz foo
2292   // Emit:
2293   //     cmp A, B
2294   //     je foo
2295   //     cmp D, E
2296   //     jle foo
2297   if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
2298     Instruction::BinaryOps Opcode = BOp->getOpcode();
2299     if (!DAG.getTargetLoweringInfo().isJumpExpensive() && BOp->hasOneUse() &&
2300         !I.hasMetadata(LLVMContext::MD_unpredictable) &&
2301         (Opcode == Instruction::And || Opcode == Instruction::Or)) {
2302       FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB,
2303                            Opcode,
2304                            getEdgeProbability(BrMBB, Succ0MBB),
2305                            getEdgeProbability(BrMBB, Succ1MBB),
2306                            /*InvertCond=*/false);
2307       // If the compares in later blocks need to use values not currently
2308       // exported from this block, export them now.  This block should always
2309       // be the first entry.
2310       assert(SL->SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!");
2311 
2312       // Allow some cases to be rejected.
2313       if (ShouldEmitAsBranches(SL->SwitchCases)) {
2314         for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i) {
2315           ExportFromCurrentBlock(SL->SwitchCases[i].CmpLHS);
2316           ExportFromCurrentBlock(SL->SwitchCases[i].CmpRHS);
2317         }
2318 
2319         // Emit the branch for this block.
2320         visitSwitchCase(SL->SwitchCases[0], BrMBB);
2321         SL->SwitchCases.erase(SL->SwitchCases.begin());
2322         return;
2323       }
2324 
2325       // Okay, we decided not to do this, remove any inserted MBB's and clear
2326       // SwitchCases.
2327       for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i)
2328         FuncInfo.MF->erase(SL->SwitchCases[i].ThisBB);
2329 
2330       SL->SwitchCases.clear();
2331     }
2332   }
2333 
2334   // Create a CaseBlock record representing this branch.
2335   CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
2336                nullptr, Succ0MBB, Succ1MBB, BrMBB, getCurSDLoc());
2337 
2338   // Use visitSwitchCase to actually insert the fast branch sequence for this
2339   // cond branch.
2340   visitSwitchCase(CB, BrMBB);
2341 }
2342 
2343 /// visitSwitchCase - Emits the necessary code to represent a single node in
2344 /// the binary search tree resulting from lowering a switch instruction.
2345 void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB,
2346                                           MachineBasicBlock *SwitchBB) {
2347   SDValue Cond;
2348   SDValue CondLHS = getValue(CB.CmpLHS);
2349   SDLoc dl = CB.DL;
2350 
2351   if (CB.CC == ISD::SETTRUE) {
2352     // Branch or fall through to TrueBB.
2353     addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
2354     SwitchBB->normalizeSuccProbs();
2355     if (CB.TrueBB != NextBlock(SwitchBB)) {
2356       DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, getControlRoot(),
2357                               DAG.getBasicBlock(CB.TrueBB)));
2358     }
2359     return;
2360   }
2361 
2362   auto &TLI = DAG.getTargetLoweringInfo();
2363   EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), CB.CmpLHS->getType());
2364 
2365   // Build the setcc now.
2366   if (!CB.CmpMHS) {
2367     // Fold "(X == true)" to X and "(X == false)" to !X to
2368     // handle common cases produced by branch lowering.
2369     if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
2370         CB.CC == ISD::SETEQ)
2371       Cond = CondLHS;
2372     else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) &&
2373              CB.CC == ISD::SETEQ) {
2374       SDValue True = DAG.getConstant(1, dl, CondLHS.getValueType());
2375       Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
2376     } else {
2377       SDValue CondRHS = getValue(CB.CmpRHS);
2378 
2379       // If a pointer's DAG type is larger than its memory type then the DAG
2380       // values are zero-extended. This breaks signed comparisons so truncate
2381       // back to the underlying type before doing the compare.
2382       if (CondLHS.getValueType() != MemVT) {
2383         CondLHS = DAG.getPtrExtOrTrunc(CondLHS, getCurSDLoc(), MemVT);
2384         CondRHS = DAG.getPtrExtOrTrunc(CondRHS, getCurSDLoc(), MemVT);
2385       }
2386       Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, CondRHS, CB.CC);
2387     }
2388   } else {
2389     assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
2390 
2391     const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
2392     const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
2393 
2394     SDValue CmpOp = getValue(CB.CmpMHS);
2395     EVT VT = CmpOp.getValueType();
2396 
2397     if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
2398       Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, dl, VT),
2399                           ISD::SETLE);
2400     } else {
2401       SDValue SUB = DAG.getNode(ISD::SUB, dl,
2402                                 VT, CmpOp, DAG.getConstant(Low, dl, VT));
2403       Cond = DAG.getSetCC(dl, MVT::i1, SUB,
2404                           DAG.getConstant(High-Low, dl, VT), ISD::SETULE);
2405     }
2406   }
2407 
2408   // Update successor info
2409   addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
2410   // TrueBB and FalseBB are always different unless the incoming IR is
2411   // degenerate. This only happens when running llc on weird IR.
2412   if (CB.TrueBB != CB.FalseBB)
2413     addSuccessorWithProb(SwitchBB, CB.FalseBB, CB.FalseProb);
2414   SwitchBB->normalizeSuccProbs();
2415 
2416   // If the lhs block is the next block, invert the condition so that we can
2417   // fall through to the lhs instead of the rhs block.
2418   if (CB.TrueBB == NextBlock(SwitchBB)) {
2419     std::swap(CB.TrueBB, CB.FalseBB);
2420     SDValue True = DAG.getConstant(1, dl, Cond.getValueType());
2421     Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
2422   }
2423 
2424   SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2425                                MVT::Other, getControlRoot(), Cond,
2426                                DAG.getBasicBlock(CB.TrueBB));
2427 
2428   // Insert the false branch. Do this even if it's a fall through branch,
2429   // this makes it easier to do DAG optimizations which require inverting
2430   // the branch condition.
2431   BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2432                        DAG.getBasicBlock(CB.FalseBB));
2433 
2434   DAG.setRoot(BrCond);
2435 }
2436 
2437 /// visitJumpTable - Emit JumpTable node in the current MBB
2438 void SelectionDAGBuilder::visitJumpTable(SwitchCG::JumpTable &JT) {
2439   // Emit the code for the jump table
2440   assert(JT.Reg != -1U && "Should lower JT Header first!");
2441   EVT PTy = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
2442   SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurSDLoc(),
2443                                      JT.Reg, PTy);
2444   SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
2445   SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, getCurSDLoc(),
2446                                     MVT::Other, Index.getValue(1),
2447                                     Table, Index);
2448   DAG.setRoot(BrJumpTable);
2449 }
2450 
2451 /// visitJumpTableHeader - This function emits necessary code to produce index
2452 /// in the JumpTable from switch case.
2453 void SelectionDAGBuilder::visitJumpTableHeader(SwitchCG::JumpTable &JT,
2454                                                JumpTableHeader &JTH,
2455                                                MachineBasicBlock *SwitchBB) {
2456   SDLoc dl = getCurSDLoc();
2457 
2458   // Subtract the lowest switch case value from the value being switched on.
2459   SDValue SwitchOp = getValue(JTH.SValue);
2460   EVT VT = SwitchOp.getValueType();
2461   SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp,
2462                             DAG.getConstant(JTH.First, dl, VT));
2463 
2464   // The SDNode we just created, which holds the value being switched on minus
2465   // the smallest case value, needs to be copied to a virtual register so it
2466   // can be used as an index into the jump table in a subsequent basic block.
2467   // This value may be smaller or larger than the target's pointer type, and
2468   // therefore require extension or truncating.
2469   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2470   SwitchOp = DAG.getZExtOrTrunc(Sub, dl, TLI.getPointerTy(DAG.getDataLayout()));
2471 
2472   unsigned JumpTableReg =
2473       FuncInfo.CreateReg(TLI.getPointerTy(DAG.getDataLayout()));
2474   SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl,
2475                                     JumpTableReg, SwitchOp);
2476   JT.Reg = JumpTableReg;
2477 
2478   if (!JTH.OmitRangeCheck) {
2479     // Emit the range check for the jump table, and branch to the default block
2480     // for the switch statement if the value being switched on exceeds the
2481     // largest case in the switch.
2482     SDValue CMP = DAG.getSetCC(
2483         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
2484                                    Sub.getValueType()),
2485         Sub, DAG.getConstant(JTH.Last - JTH.First, dl, VT), ISD::SETUGT);
2486 
2487     SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2488                                  MVT::Other, CopyTo, CMP,
2489                                  DAG.getBasicBlock(JT.Default));
2490 
2491     // Avoid emitting unnecessary branches to the next block.
2492     if (JT.MBB != NextBlock(SwitchBB))
2493       BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2494                            DAG.getBasicBlock(JT.MBB));
2495 
2496     DAG.setRoot(BrCond);
2497   } else {
2498     // Avoid emitting unnecessary branches to the next block.
2499     if (JT.MBB != NextBlock(SwitchBB))
2500       DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, CopyTo,
2501                               DAG.getBasicBlock(JT.MBB)));
2502     else
2503       DAG.setRoot(CopyTo);
2504   }
2505 }
2506 
2507 /// Create a LOAD_STACK_GUARD node, and let it carry the target specific global
2508 /// variable if there exists one.
2509 static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL,
2510                                  SDValue &Chain) {
2511   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2512   EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
2513   EVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout());
2514   MachineFunction &MF = DAG.getMachineFunction();
2515   Value *Global = TLI.getSDagStackGuard(*MF.getFunction().getParent());
2516   MachineSDNode *Node =
2517       DAG.getMachineNode(TargetOpcode::LOAD_STACK_GUARD, DL, PtrTy, Chain);
2518   if (Global) {
2519     MachinePointerInfo MPInfo(Global);
2520     auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
2521                  MachineMemOperand::MODereferenceable;
2522     MachineMemOperand *MemRef = MF.getMachineMemOperand(
2523         MPInfo, Flags, PtrTy.getSizeInBits() / 8, DAG.getEVTAlign(PtrTy));
2524     DAG.setNodeMemRefs(Node, {MemRef});
2525   }
2526   if (PtrTy != PtrMemTy)
2527     return DAG.getPtrExtOrTrunc(SDValue(Node, 0), DL, PtrMemTy);
2528   return SDValue(Node, 0);
2529 }
2530 
2531 /// Codegen a new tail for a stack protector check ParentMBB which has had its
2532 /// tail spliced into a stack protector check success bb.
2533 ///
2534 /// For a high level explanation of how this fits into the stack protector
2535 /// generation see the comment on the declaration of class
2536 /// StackProtectorDescriptor.
2537 void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
2538                                                   MachineBasicBlock *ParentBB) {
2539 
2540   // First create the loads to the guard/stack slot for the comparison.
2541   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2542   EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
2543   EVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout());
2544 
2545   MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
2546   int FI = MFI.getStackProtectorIndex();
2547 
2548   SDValue Guard;
2549   SDLoc dl = getCurSDLoc();
2550   SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy);
2551   const Module &M = *ParentBB->getParent()->getFunction().getParent();
2552   unsigned Align = DL->getPrefTypeAlignment(Type::getInt8PtrTy(M.getContext()));
2553 
2554   // Generate code to load the content of the guard slot.
2555   SDValue GuardVal = DAG.getLoad(
2556       PtrMemTy, dl, DAG.getEntryNode(), StackSlotPtr,
2557       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), Align,
2558       MachineMemOperand::MOVolatile);
2559 
2560   if (TLI.useStackGuardXorFP())
2561     GuardVal = TLI.emitStackGuardXorFP(DAG, GuardVal, dl);
2562 
2563   // Retrieve guard check function, nullptr if instrumentation is inlined.
2564   if (const Function *GuardCheckFn = TLI.getSSPStackGuardCheck(M)) {
2565     // The target provides a guard check function to validate the guard value.
2566     // Generate a call to that function with the content of the guard slot as
2567     // argument.
2568     FunctionType *FnTy = GuardCheckFn->getFunctionType();
2569     assert(FnTy->getNumParams() == 1 && "Invalid function signature");
2570 
2571     TargetLowering::ArgListTy Args;
2572     TargetLowering::ArgListEntry Entry;
2573     Entry.Node = GuardVal;
2574     Entry.Ty = FnTy->getParamType(0);
2575     if (GuardCheckFn->hasAttribute(1, Attribute::AttrKind::InReg))
2576       Entry.IsInReg = true;
2577     Args.push_back(Entry);
2578 
2579     TargetLowering::CallLoweringInfo CLI(DAG);
2580     CLI.setDebugLoc(getCurSDLoc())
2581         .setChain(DAG.getEntryNode())
2582         .setCallee(GuardCheckFn->getCallingConv(), FnTy->getReturnType(),
2583                    getValue(GuardCheckFn), std::move(Args));
2584 
2585     std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
2586     DAG.setRoot(Result.second);
2587     return;
2588   }
2589 
2590   // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
2591   // Otherwise, emit a volatile load to retrieve the stack guard value.
2592   SDValue Chain = DAG.getEntryNode();
2593   if (TLI.useLoadStackGuardNode()) {
2594     Guard = getLoadStackGuard(DAG, dl, Chain);
2595   } else {
2596     const Value *IRGuard = TLI.getSDagStackGuard(M);
2597     SDValue GuardPtr = getValue(IRGuard);
2598 
2599     Guard = DAG.getLoad(PtrMemTy, dl, Chain, GuardPtr,
2600                         MachinePointerInfo(IRGuard, 0), Align,
2601                         MachineMemOperand::MOVolatile);
2602   }
2603 
2604   // Perform the comparison via a getsetcc.
2605   SDValue Cmp = DAG.getSetCC(dl, TLI.getSetCCResultType(DAG.getDataLayout(),
2606                                                         *DAG.getContext(),
2607                                                         Guard.getValueType()),
2608                              Guard, GuardVal, ISD::SETNE);
2609 
2610   // If the guard/stackslot do not equal, branch to failure MBB.
2611   SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2612                                MVT::Other, GuardVal.getOperand(0),
2613                                Cmp, DAG.getBasicBlock(SPD.getFailureMBB()));
2614   // Otherwise branch to success MBB.
2615   SDValue Br = DAG.getNode(ISD::BR, dl,
2616                            MVT::Other, BrCond,
2617                            DAG.getBasicBlock(SPD.getSuccessMBB()));
2618 
2619   DAG.setRoot(Br);
2620 }
2621 
2622 /// Codegen the failure basic block for a stack protector check.
2623 ///
2624 /// A failure stack protector machine basic block consists simply of a call to
2625 /// __stack_chk_fail().
2626 ///
2627 /// For a high level explanation of how this fits into the stack protector
2628 /// generation see the comment on the declaration of class
2629 /// StackProtectorDescriptor.
2630 void
2631 SelectionDAGBuilder::visitSPDescriptorFailure(StackProtectorDescriptor &SPD) {
2632   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2633   TargetLowering::MakeLibCallOptions CallOptions;
2634   CallOptions.setDiscardResult(true);
2635   SDValue Chain =
2636       TLI.makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid,
2637                       None, CallOptions, getCurSDLoc()).second;
2638   // On PS4, the "return address" must still be within the calling function,
2639   // even if it's at the very end, so emit an explicit TRAP here.
2640   // Passing 'true' for doesNotReturn above won't generate the trap for us.
2641   if (TM.getTargetTriple().isPS4CPU())
2642     Chain = DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, Chain);
2643 
2644   DAG.setRoot(Chain);
2645 }
2646 
2647 /// visitBitTestHeader - This function emits necessary code to produce value
2648 /// suitable for "bit tests"
2649 void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
2650                                              MachineBasicBlock *SwitchBB) {
2651   SDLoc dl = getCurSDLoc();
2652 
2653   // Subtract the minimum value.
2654   SDValue SwitchOp = getValue(B.SValue);
2655   EVT VT = SwitchOp.getValueType();
2656   SDValue RangeSub =
2657       DAG.getNode(ISD::SUB, dl, VT, SwitchOp, DAG.getConstant(B.First, dl, VT));
2658 
2659   // Determine the type of the test operands.
2660   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2661   bool UsePtrType = false;
2662   if (!TLI.isTypeLegal(VT)) {
2663     UsePtrType = true;
2664   } else {
2665     for (unsigned i = 0, e = B.Cases.size(); i != e; ++i)
2666       if (!isUIntN(VT.getSizeInBits(), B.Cases[i].Mask)) {
2667         // Switch table case range are encoded into series of masks.
2668         // Just use pointer type, it's guaranteed to fit.
2669         UsePtrType = true;
2670         break;
2671       }
2672   }
2673   SDValue Sub = RangeSub;
2674   if (UsePtrType) {
2675     VT = TLI.getPointerTy(DAG.getDataLayout());
2676     Sub = DAG.getZExtOrTrunc(Sub, dl, VT);
2677   }
2678 
2679   B.RegVT = VT.getSimpleVT();
2680   B.Reg = FuncInfo.CreateReg(B.RegVT);
2681   SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl, B.Reg, Sub);
2682 
2683   MachineBasicBlock* MBB = B.Cases[0].ThisBB;
2684 
2685   if (!B.OmitRangeCheck)
2686     addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb);
2687   addSuccessorWithProb(SwitchBB, MBB, B.Prob);
2688   SwitchBB->normalizeSuccProbs();
2689 
2690   SDValue Root = CopyTo;
2691   if (!B.OmitRangeCheck) {
2692     // Conditional branch to the default block.
2693     SDValue RangeCmp = DAG.getSetCC(dl,
2694         TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
2695                                RangeSub.getValueType()),
2696         RangeSub, DAG.getConstant(B.Range, dl, RangeSub.getValueType()),
2697         ISD::SETUGT);
2698 
2699     Root = DAG.getNode(ISD::BRCOND, dl, MVT::Other, Root, RangeCmp,
2700                        DAG.getBasicBlock(B.Default));
2701   }
2702 
2703   // Avoid emitting unnecessary branches to the next block.
2704   if (MBB != NextBlock(SwitchBB))
2705     Root = DAG.getNode(ISD::BR, dl, MVT::Other, Root, DAG.getBasicBlock(MBB));
2706 
2707   DAG.setRoot(Root);
2708 }
2709 
2710 /// visitBitTestCase - this function produces one "bit test"
2711 void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB,
2712                                            MachineBasicBlock* NextMBB,
2713                                            BranchProbability BranchProbToNext,
2714                                            unsigned Reg,
2715                                            BitTestCase &B,
2716                                            MachineBasicBlock *SwitchBB) {
2717   SDLoc dl = getCurSDLoc();
2718   MVT VT = BB.RegVT;
2719   SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), dl, Reg, VT);
2720   SDValue Cmp;
2721   unsigned PopCount = countPopulation(B.Mask);
2722   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2723   if (PopCount == 1) {
2724     // Testing for a single bit; just compare the shift count with what it
2725     // would need to be to shift a 1 bit in that position.
2726     Cmp = DAG.getSetCC(
2727         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2728         ShiftOp, DAG.getConstant(countTrailingZeros(B.Mask), dl, VT),
2729         ISD::SETEQ);
2730   } else if (PopCount == BB.Range) {
2731     // There is only one zero bit in the range, test for it directly.
2732     Cmp = DAG.getSetCC(
2733         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2734         ShiftOp, DAG.getConstant(countTrailingOnes(B.Mask), dl, VT),
2735         ISD::SETNE);
2736   } else {
2737     // Make desired shift
2738     SDValue SwitchVal = DAG.getNode(ISD::SHL, dl, VT,
2739                                     DAG.getConstant(1, dl, VT), ShiftOp);
2740 
2741     // Emit bit tests and jumps
2742     SDValue AndOp = DAG.getNode(ISD::AND, dl,
2743                                 VT, SwitchVal, DAG.getConstant(B.Mask, dl, VT));
2744     Cmp = DAG.getSetCC(
2745         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2746         AndOp, DAG.getConstant(0, dl, VT), ISD::SETNE);
2747   }
2748 
2749   // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
2750   addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb);
2751   // The branch probability from SwitchBB to NextMBB is BranchProbToNext.
2752   addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
2753   // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is
2754   // one as they are relative probabilities (and thus work more like weights),
2755   // and hence we need to normalize them to let the sum of them become one.
2756   SwitchBB->normalizeSuccProbs();
2757 
2758   SDValue BrAnd = DAG.getNode(ISD::BRCOND, dl,
2759                               MVT::Other, getControlRoot(),
2760                               Cmp, DAG.getBasicBlock(B.TargetBB));
2761 
2762   // Avoid emitting unnecessary branches to the next block.
2763   if (NextMBB != NextBlock(SwitchBB))
2764     BrAnd = DAG.getNode(ISD::BR, dl, MVT::Other, BrAnd,
2765                         DAG.getBasicBlock(NextMBB));
2766 
2767   DAG.setRoot(BrAnd);
2768 }
2769 
2770 void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
2771   MachineBasicBlock *InvokeMBB = FuncInfo.MBB;
2772 
2773   // Retrieve successors. Look through artificial IR level blocks like
2774   // catchswitch for successors.
2775   MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
2776   const BasicBlock *EHPadBB = I.getSuccessor(1);
2777 
2778   // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
2779   // have to do anything here to lower funclet bundles.
2780   assert(!I.hasOperandBundlesOtherThan({LLVMContext::OB_deopt,
2781                                         LLVMContext::OB_gc_transition,
2782                                         LLVMContext::OB_gc_live,
2783                                         LLVMContext::OB_funclet,
2784                                         LLVMContext::OB_cfguardtarget}) &&
2785          "Cannot lower invokes with arbitrary operand bundles yet!");
2786 
2787   const Value *Callee(I.getCalledOperand());
2788   const Function *Fn = dyn_cast<Function>(Callee);
2789   if (isa<InlineAsm>(Callee))
2790     visitInlineAsm(I);
2791   else if (Fn && Fn->isIntrinsic()) {
2792     switch (Fn->getIntrinsicID()) {
2793     default:
2794       llvm_unreachable("Cannot invoke this intrinsic");
2795     case Intrinsic::donothing:
2796       // Ignore invokes to @llvm.donothing: jump directly to the next BB.
2797       break;
2798     case Intrinsic::experimental_patchpoint_void:
2799     case Intrinsic::experimental_patchpoint_i64:
2800       visitPatchpoint(I, EHPadBB);
2801       break;
2802     case Intrinsic::experimental_gc_statepoint:
2803       LowerStatepoint(cast<GCStatepointInst>(I), EHPadBB);
2804       break;
2805     case Intrinsic::wasm_rethrow_in_catch: {
2806       // This is usually done in visitTargetIntrinsic, but this intrinsic is
2807       // special because it can be invoked, so we manually lower it to a DAG
2808       // node here.
2809       SmallVector<SDValue, 8> Ops;
2810       Ops.push_back(getRoot()); // inchain
2811       const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2812       Ops.push_back(
2813           DAG.getTargetConstant(Intrinsic::wasm_rethrow_in_catch, getCurSDLoc(),
2814                                 TLI.getPointerTy(DAG.getDataLayout())));
2815       SDVTList VTs = DAG.getVTList(ArrayRef<EVT>({MVT::Other})); // outchain
2816       DAG.setRoot(DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops));
2817       break;
2818     }
2819     }
2820   } else if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) {
2821     // Currently we do not lower any intrinsic calls with deopt operand bundles.
2822     // Eventually we will support lowering the @llvm.experimental.deoptimize
2823     // intrinsic, and right now there are no plans to support other intrinsics
2824     // with deopt state.
2825     LowerCallSiteWithDeoptBundle(&I, getValue(Callee), EHPadBB);
2826   } else {
2827     LowerCallTo(I, getValue(Callee), false, EHPadBB);
2828   }
2829 
2830   // If the value of the invoke is used outside of its defining block, make it
2831   // available as a virtual register.
2832   // We already took care of the exported value for the statepoint instruction
2833   // during call to the LowerStatepoint.
2834   if (!isa<GCStatepointInst>(I)) {
2835     CopyToExportRegsIfNeeded(&I);
2836   }
2837 
2838   SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
2839   BranchProbabilityInfo *BPI = FuncInfo.BPI;
2840   BranchProbability EHPadBBProb =
2841       BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB)
2842           : BranchProbability::getZero();
2843   findUnwindDestinations(FuncInfo, EHPadBB, EHPadBBProb, UnwindDests);
2844 
2845   // Update successor info.
2846   addSuccessorWithProb(InvokeMBB, Return);
2847   for (auto &UnwindDest : UnwindDests) {
2848     UnwindDest.first->setIsEHPad();
2849     addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
2850   }
2851   InvokeMBB->normalizeSuccProbs();
2852 
2853   // Drop into normal successor.
2854   DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, getControlRoot(),
2855                           DAG.getBasicBlock(Return)));
2856 }
2857 
2858 void SelectionDAGBuilder::visitCallBr(const CallBrInst &I) {
2859   MachineBasicBlock *CallBrMBB = FuncInfo.MBB;
2860 
2861   // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
2862   // have to do anything here to lower funclet bundles.
2863   assert(!I.hasOperandBundlesOtherThan(
2864              {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) &&
2865          "Cannot lower callbrs with arbitrary operand bundles yet!");
2866 
2867   assert(I.isInlineAsm() && "Only know how to handle inlineasm callbr");
2868   visitInlineAsm(I);
2869   CopyToExportRegsIfNeeded(&I);
2870 
2871   // Retrieve successors.
2872   MachineBasicBlock *Return = FuncInfo.MBBMap[I.getDefaultDest()];
2873   Return->setInlineAsmBrDefaultTarget();
2874 
2875   // Update successor info.
2876   addSuccessorWithProb(CallBrMBB, Return, BranchProbability::getOne());
2877   for (unsigned i = 0, e = I.getNumIndirectDests(); i < e; ++i) {
2878     MachineBasicBlock *Target = FuncInfo.MBBMap[I.getIndirectDest(i)];
2879     addSuccessorWithProb(CallBrMBB, Target, BranchProbability::getZero());
2880     CallBrMBB->addInlineAsmBrIndirectTarget(Target);
2881   }
2882   CallBrMBB->normalizeSuccProbs();
2883 
2884   // Drop into default successor.
2885   DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
2886                           MVT::Other, getControlRoot(),
2887                           DAG.getBasicBlock(Return)));
2888 }
2889 
2890 void SelectionDAGBuilder::visitResume(const ResumeInst &RI) {
2891   llvm_unreachable("SelectionDAGBuilder shouldn't visit resume instructions!");
2892 }
2893 
2894 void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) {
2895   assert(FuncInfo.MBB->isEHPad() &&
2896          "Call to landingpad not in landing pad!");
2897 
2898   // If there aren't registers to copy the values into (e.g., during SjLj
2899   // exceptions), then don't bother to create these DAG nodes.
2900   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2901   const Constant *PersonalityFn = FuncInfo.Fn->getPersonalityFn();
2902   if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
2903       TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
2904     return;
2905 
2906   // If landingpad's return type is token type, we don't create DAG nodes
2907   // for its exception pointer and selector value. The extraction of exception
2908   // pointer or selector value from token type landingpads is not currently
2909   // supported.
2910   if (LP.getType()->isTokenTy())
2911     return;
2912 
2913   SmallVector<EVT, 2> ValueVTs;
2914   SDLoc dl = getCurSDLoc();
2915   ComputeValueVTs(TLI, DAG.getDataLayout(), LP.getType(), ValueVTs);
2916   assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported");
2917 
2918   // Get the two live-in registers as SDValues. The physregs have already been
2919   // copied into virtual registers.
2920   SDValue Ops[2];
2921   if (FuncInfo.ExceptionPointerVirtReg) {
2922     Ops[0] = DAG.getZExtOrTrunc(
2923         DAG.getCopyFromReg(DAG.getEntryNode(), dl,
2924                            FuncInfo.ExceptionPointerVirtReg,
2925                            TLI.getPointerTy(DAG.getDataLayout())),
2926         dl, ValueVTs[0]);
2927   } else {
2928     Ops[0] = DAG.getConstant(0, dl, TLI.getPointerTy(DAG.getDataLayout()));
2929   }
2930   Ops[1] = DAG.getZExtOrTrunc(
2931       DAG.getCopyFromReg(DAG.getEntryNode(), dl,
2932                          FuncInfo.ExceptionSelectorVirtReg,
2933                          TLI.getPointerTy(DAG.getDataLayout())),
2934       dl, ValueVTs[1]);
2935 
2936   // Merge into one.
2937   SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl,
2938                             DAG.getVTList(ValueVTs), Ops);
2939   setValue(&LP, Res);
2940 }
2941 
2942 void SelectionDAGBuilder::UpdateSplitBlock(MachineBasicBlock *First,
2943                                            MachineBasicBlock *Last) {
2944   // Update JTCases.
2945   for (unsigned i = 0, e = SL->JTCases.size(); i != e; ++i)
2946     if (SL->JTCases[i].first.HeaderBB == First)
2947       SL->JTCases[i].first.HeaderBB = Last;
2948 
2949   // Update BitTestCases.
2950   for (unsigned i = 0, e = SL->BitTestCases.size(); i != e; ++i)
2951     if (SL->BitTestCases[i].Parent == First)
2952       SL->BitTestCases[i].Parent = Last;
2953 
2954   // SelectionDAGISel::FinishBasicBlock will add PHI operands for the
2955   // successors of the fallthrough block. Here, we add PHI operands for the
2956   // successors of the INLINEASM_BR block itself.
2957   if (First->getFirstTerminator()->getOpcode() == TargetOpcode::INLINEASM_BR)
2958     for (std::pair<MachineInstr *, unsigned> &pair : FuncInfo.PHINodesToUpdate)
2959       if (First->isSuccessor(pair.first->getParent()))
2960         MachineInstrBuilder(*First->getParent(), pair.first)
2961             .addReg(pair.second)
2962             .addMBB(First);
2963 }
2964 
2965 void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
2966   MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB;
2967 
2968   // Update machine-CFG edges with unique successors.
2969   SmallSet<BasicBlock*, 32> Done;
2970   for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) {
2971     BasicBlock *BB = I.getSuccessor(i);
2972     bool Inserted = Done.insert(BB).second;
2973     if (!Inserted)
2974         continue;
2975 
2976     MachineBasicBlock *Succ = FuncInfo.MBBMap[BB];
2977     addSuccessorWithProb(IndirectBrMBB, Succ);
2978   }
2979   IndirectBrMBB->normalizeSuccProbs();
2980 
2981   DAG.setRoot(DAG.getNode(ISD::BRIND, getCurSDLoc(),
2982                           MVT::Other, getControlRoot(),
2983                           getValue(I.getAddress())));
2984 }
2985 
2986 void SelectionDAGBuilder::visitUnreachable(const UnreachableInst &I) {
2987   if (!DAG.getTarget().Options.TrapUnreachable)
2988     return;
2989 
2990   // We may be able to ignore unreachable behind a noreturn call.
2991   if (DAG.getTarget().Options.NoTrapAfterNoreturn) {
2992     const BasicBlock &BB = *I.getParent();
2993     if (&I != &BB.front()) {
2994       BasicBlock::const_iterator PredI =
2995         std::prev(BasicBlock::const_iterator(&I));
2996       if (const CallInst *Call = dyn_cast<CallInst>(&*PredI)) {
2997         if (Call->doesNotReturn())
2998           return;
2999       }
3000     }
3001   }
3002 
3003   DAG.setRoot(DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, DAG.getRoot()));
3004 }
3005 
3006 void SelectionDAGBuilder::visitFSub(const User &I) {
3007   // -0.0 - X --> fneg
3008   Type *Ty = I.getType();
3009   if (isa<Constant>(I.getOperand(0)) &&
3010       I.getOperand(0) == ConstantFP::getZeroValueForNegation(Ty)) {
3011     SDValue Op2 = getValue(I.getOperand(1));
3012     setValue(&I, DAG.getNode(ISD::FNEG, getCurSDLoc(),
3013                              Op2.getValueType(), Op2));
3014     return;
3015   }
3016 
3017   visitBinary(I, ISD::FSUB);
3018 }
3019 
3020 void SelectionDAGBuilder::visitUnary(const User &I, unsigned Opcode) {
3021   SDNodeFlags Flags;
3022 
3023   SDValue Op = getValue(I.getOperand(0));
3024   SDValue UnNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op.getValueType(),
3025                                     Op, Flags);
3026   setValue(&I, UnNodeValue);
3027 }
3028 
3029 void SelectionDAGBuilder::visitBinary(const User &I, unsigned Opcode) {
3030   SDNodeFlags Flags;
3031   if (auto *OFBinOp = dyn_cast<OverflowingBinaryOperator>(&I)) {
3032     Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap());
3033     Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap());
3034   }
3035   if (auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I)) {
3036     Flags.setExact(ExactOp->isExact());
3037   }
3038 
3039   SDValue Op1 = getValue(I.getOperand(0));
3040   SDValue Op2 = getValue(I.getOperand(1));
3041   SDValue BinNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(),
3042                                      Op1, Op2, Flags);
3043   setValue(&I, BinNodeValue);
3044 }
3045 
3046 void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
3047   SDValue Op1 = getValue(I.getOperand(0));
3048   SDValue Op2 = getValue(I.getOperand(1));
3049 
3050   EVT ShiftTy = DAG.getTargetLoweringInfo().getShiftAmountTy(
3051       Op1.getValueType(), DAG.getDataLayout());
3052 
3053   // Coerce the shift amount to the right type if we can.
3054   if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {
3055     unsigned ShiftSize = ShiftTy.getSizeInBits();
3056     unsigned Op2Size = Op2.getValueSizeInBits();
3057     SDLoc DL = getCurSDLoc();
3058 
3059     // If the operand is smaller than the shift count type, promote it.
3060     if (ShiftSize > Op2Size)
3061       Op2 = DAG.getNode(ISD::ZERO_EXTEND, DL, ShiftTy, Op2);
3062 
3063     // If the operand is larger than the shift count type but the shift
3064     // count type has enough bits to represent any shift value, truncate
3065     // it now. This is a common case and it exposes the truncate to
3066     // optimization early.
3067     else if (ShiftSize >= Log2_32_Ceil(Op2.getValueSizeInBits()))
3068       Op2 = DAG.getNode(ISD::TRUNCATE, DL, ShiftTy, Op2);
3069     // Otherwise we'll need to temporarily settle for some other convenient
3070     // type.  Type legalization will make adjustments once the shiftee is split.
3071     else
3072       Op2 = DAG.getZExtOrTrunc(Op2, DL, MVT::i32);
3073   }
3074 
3075   bool nuw = false;
3076   bool nsw = false;
3077   bool exact = false;
3078 
3079   if (Opcode == ISD::SRL || Opcode == ISD::SRA || Opcode == ISD::SHL) {
3080 
3081     if (const OverflowingBinaryOperator *OFBinOp =
3082             dyn_cast<const OverflowingBinaryOperator>(&I)) {
3083       nuw = OFBinOp->hasNoUnsignedWrap();
3084       nsw = OFBinOp->hasNoSignedWrap();
3085     }
3086     if (const PossiblyExactOperator *ExactOp =
3087             dyn_cast<const PossiblyExactOperator>(&I))
3088       exact = ExactOp->isExact();
3089   }
3090   SDNodeFlags Flags;
3091   Flags.setExact(exact);
3092   Flags.setNoSignedWrap(nsw);
3093   Flags.setNoUnsignedWrap(nuw);
3094   SDValue Res = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(), Op1, Op2,
3095                             Flags);
3096   setValue(&I, Res);
3097 }
3098 
3099 void SelectionDAGBuilder::visitSDiv(const User &I) {
3100   SDValue Op1 = getValue(I.getOperand(0));
3101   SDValue Op2 = getValue(I.getOperand(1));
3102 
3103   SDNodeFlags Flags;
3104   Flags.setExact(isa<PossiblyExactOperator>(&I) &&
3105                  cast<PossiblyExactOperator>(&I)->isExact());
3106   setValue(&I, DAG.getNode(ISD::SDIV, getCurSDLoc(), Op1.getValueType(), Op1,
3107                            Op2, Flags));
3108 }
3109 
3110 void SelectionDAGBuilder::visitICmp(const User &I) {
3111   ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
3112   if (const ICmpInst *IC = dyn_cast<ICmpInst>(&I))
3113     predicate = IC->getPredicate();
3114   else if (const ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
3115     predicate = ICmpInst::Predicate(IC->getPredicate());
3116   SDValue Op1 = getValue(I.getOperand(0));
3117   SDValue Op2 = getValue(I.getOperand(1));
3118   ISD::CondCode Opcode = getICmpCondCode(predicate);
3119 
3120   auto &TLI = DAG.getTargetLoweringInfo();
3121   EVT MemVT =
3122       TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
3123 
3124   // If a pointer's DAG type is larger than its memory type then the DAG values
3125   // are zero-extended. This breaks signed comparisons so truncate back to the
3126   // underlying type before doing the compare.
3127   if (Op1.getValueType() != MemVT) {
3128     Op1 = DAG.getPtrExtOrTrunc(Op1, getCurSDLoc(), MemVT);
3129     Op2 = DAG.getPtrExtOrTrunc(Op2, getCurSDLoc(), MemVT);
3130   }
3131 
3132   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3133                                                         I.getType());
3134   setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Opcode));
3135 }
3136 
3137 void SelectionDAGBuilder::visitFCmp(const User &I) {
3138   FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
3139   if (const FCmpInst *FC = dyn_cast<FCmpInst>(&I))
3140     predicate = FC->getPredicate();
3141   else if (const ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
3142     predicate = FCmpInst::Predicate(FC->getPredicate());
3143   SDValue Op1 = getValue(I.getOperand(0));
3144   SDValue Op2 = getValue(I.getOperand(1));
3145 
3146   ISD::CondCode Condition = getFCmpCondCode(predicate);
3147   auto *FPMO = dyn_cast<FPMathOperator>(&I);
3148   if ((FPMO && FPMO->hasNoNaNs()) || TM.Options.NoNaNsFPMath)
3149     Condition = getFCmpCodeWithoutNaN(Condition);
3150 
3151   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3152                                                         I.getType());
3153   setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition));
3154 }
3155 
3156 // Check if the condition of the select has one use or two users that are both
3157 // selects with the same condition.
3158 static bool hasOnlySelectUsers(const Value *Cond) {
3159   return llvm::all_of(Cond->users(), [](const Value *V) {
3160     return isa<SelectInst>(V);
3161   });
3162 }
3163 
3164 void SelectionDAGBuilder::visitSelect(const User &I) {
3165   SmallVector<EVT, 4> ValueVTs;
3166   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
3167                   ValueVTs);
3168   unsigned NumValues = ValueVTs.size();
3169   if (NumValues == 0) return;
3170 
3171   SmallVector<SDValue, 4> Values(NumValues);
3172   SDValue Cond     = getValue(I.getOperand(0));
3173   SDValue LHSVal   = getValue(I.getOperand(1));
3174   SDValue RHSVal   = getValue(I.getOperand(2));
3175   SmallVector<SDValue, 1> BaseOps(1, Cond);
3176   ISD::NodeType OpCode =
3177       Cond.getValueType().isVector() ? ISD::VSELECT : ISD::SELECT;
3178 
3179   bool IsUnaryAbs = false;
3180 
3181   // Min/max matching is only viable if all output VTs are the same.
3182   if (is_splat(ValueVTs)) {
3183     EVT VT = ValueVTs[0];
3184     LLVMContext &Ctx = *DAG.getContext();
3185     auto &TLI = DAG.getTargetLoweringInfo();
3186 
3187     // We care about the legality of the operation after it has been type
3188     // legalized.
3189     while (TLI.getTypeAction(Ctx, VT) != TargetLoweringBase::TypeLegal)
3190       VT = TLI.getTypeToTransformTo(Ctx, VT);
3191 
3192     // If the vselect is legal, assume we want to leave this as a vector setcc +
3193     // vselect. Otherwise, if this is going to be scalarized, we want to see if
3194     // min/max is legal on the scalar type.
3195     bool UseScalarMinMax = VT.isVector() &&
3196       !TLI.isOperationLegalOrCustom(ISD::VSELECT, VT);
3197 
3198     Value *LHS, *RHS;
3199     auto SPR = matchSelectPattern(const_cast<User*>(&I), LHS, RHS);
3200     ISD::NodeType Opc = ISD::DELETED_NODE;
3201     switch (SPR.Flavor) {
3202     case SPF_UMAX:    Opc = ISD::UMAX; break;
3203     case SPF_UMIN:    Opc = ISD::UMIN; break;
3204     case SPF_SMAX:    Opc = ISD::SMAX; break;
3205     case SPF_SMIN:    Opc = ISD::SMIN; break;
3206     case SPF_FMINNUM:
3207       switch (SPR.NaNBehavior) {
3208       case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
3209       case SPNB_RETURNS_NAN:   Opc = ISD::FMINIMUM; break;
3210       case SPNB_RETURNS_OTHER: Opc = ISD::FMINNUM; break;
3211       case SPNB_RETURNS_ANY: {
3212         if (TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT))
3213           Opc = ISD::FMINNUM;
3214         else if (TLI.isOperationLegalOrCustom(ISD::FMINIMUM, VT))
3215           Opc = ISD::FMINIMUM;
3216         else if (UseScalarMinMax)
3217           Opc = TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT.getScalarType()) ?
3218             ISD::FMINNUM : ISD::FMINIMUM;
3219         break;
3220       }
3221       }
3222       break;
3223     case SPF_FMAXNUM:
3224       switch (SPR.NaNBehavior) {
3225       case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
3226       case SPNB_RETURNS_NAN:   Opc = ISD::FMAXIMUM; break;
3227       case SPNB_RETURNS_OTHER: Opc = ISD::FMAXNUM; break;
3228       case SPNB_RETURNS_ANY:
3229 
3230         if (TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT))
3231           Opc = ISD::FMAXNUM;
3232         else if (TLI.isOperationLegalOrCustom(ISD::FMAXIMUM, VT))
3233           Opc = ISD::FMAXIMUM;
3234         else if (UseScalarMinMax)
3235           Opc = TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT.getScalarType()) ?
3236             ISD::FMAXNUM : ISD::FMAXIMUM;
3237         break;
3238       }
3239       break;
3240     case SPF_ABS:
3241       IsUnaryAbs = true;
3242       Opc = ISD::ABS;
3243       break;
3244     case SPF_NABS:
3245       // TODO: we need to produce sub(0, abs(X)).
3246     default: break;
3247     }
3248 
3249     if (!IsUnaryAbs && Opc != ISD::DELETED_NODE &&
3250         (TLI.isOperationLegalOrCustom(Opc, VT) ||
3251          (UseScalarMinMax &&
3252           TLI.isOperationLegalOrCustom(Opc, VT.getScalarType()))) &&
3253         // If the underlying comparison instruction is used by any other
3254         // instruction, the consumed instructions won't be destroyed, so it is
3255         // not profitable to convert to a min/max.
3256         hasOnlySelectUsers(cast<SelectInst>(I).getCondition())) {
3257       OpCode = Opc;
3258       LHSVal = getValue(LHS);
3259       RHSVal = getValue(RHS);
3260       BaseOps.clear();
3261     }
3262 
3263     if (IsUnaryAbs) {
3264       OpCode = Opc;
3265       LHSVal = getValue(LHS);
3266       BaseOps.clear();
3267     }
3268   }
3269 
3270   if (IsUnaryAbs) {
3271     for (unsigned i = 0; i != NumValues; ++i) {
3272       Values[i] =
3273           DAG.getNode(OpCode, getCurSDLoc(),
3274                       LHSVal.getNode()->getValueType(LHSVal.getResNo() + i),
3275                       SDValue(LHSVal.getNode(), LHSVal.getResNo() + i));
3276     }
3277   } else {
3278     for (unsigned i = 0; i != NumValues; ++i) {
3279       SmallVector<SDValue, 3> Ops(BaseOps.begin(), BaseOps.end());
3280       Ops.push_back(SDValue(LHSVal.getNode(), LHSVal.getResNo() + i));
3281       Ops.push_back(SDValue(RHSVal.getNode(), RHSVal.getResNo() + i));
3282       Values[i] = DAG.getNode(
3283           OpCode, getCurSDLoc(),
3284           LHSVal.getNode()->getValueType(LHSVal.getResNo() + i), Ops);
3285     }
3286   }
3287 
3288   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3289                            DAG.getVTList(ValueVTs), Values));
3290 }
3291 
3292 void SelectionDAGBuilder::visitTrunc(const User &I) {
3293   // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
3294   SDValue N = getValue(I.getOperand(0));
3295   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3296                                                         I.getType());
3297   setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), DestVT, N));
3298 }
3299 
3300 void SelectionDAGBuilder::visitZExt(const User &I) {
3301   // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3302   // ZExt also can't be a cast to bool for same reason. So, nothing much to do
3303   SDValue N = getValue(I.getOperand(0));
3304   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3305                                                         I.getType());
3306   setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurSDLoc(), DestVT, N));
3307 }
3308 
3309 void SelectionDAGBuilder::visitSExt(const User &I) {
3310   // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3311   // SExt also can't be a cast to bool for same reason. So, nothing much to do
3312   SDValue N = getValue(I.getOperand(0));
3313   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3314                                                         I.getType());
3315   setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N));
3316 }
3317 
3318 void SelectionDAGBuilder::visitFPTrunc(const User &I) {
3319   // FPTrunc is never a no-op cast, no need to check
3320   SDValue N = getValue(I.getOperand(0));
3321   SDLoc dl = getCurSDLoc();
3322   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3323   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3324   setValue(&I, DAG.getNode(ISD::FP_ROUND, dl, DestVT, N,
3325                            DAG.getTargetConstant(
3326                                0, dl, TLI.getPointerTy(DAG.getDataLayout()))));
3327 }
3328 
3329 void SelectionDAGBuilder::visitFPExt(const User &I) {
3330   // FPExt is never a no-op cast, no need to check
3331   SDValue N = getValue(I.getOperand(0));
3332   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3333                                                         I.getType());
3334   setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurSDLoc(), DestVT, N));
3335 }
3336 
3337 void SelectionDAGBuilder::visitFPToUI(const User &I) {
3338   // FPToUI is never a no-op cast, no need to check
3339   SDValue N = getValue(I.getOperand(0));
3340   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3341                                                         I.getType());
3342   setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurSDLoc(), DestVT, N));
3343 }
3344 
3345 void SelectionDAGBuilder::visitFPToSI(const User &I) {
3346   // FPToSI is never a no-op cast, no need to check
3347   SDValue N = getValue(I.getOperand(0));
3348   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3349                                                         I.getType());
3350   setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurSDLoc(), DestVT, N));
3351 }
3352 
3353 void SelectionDAGBuilder::visitUIToFP(const User &I) {
3354   // UIToFP is never a no-op cast, no need to check
3355   SDValue N = getValue(I.getOperand(0));
3356   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3357                                                         I.getType());
3358   setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurSDLoc(), DestVT, N));
3359 }
3360 
3361 void SelectionDAGBuilder::visitSIToFP(const User &I) {
3362   // SIToFP is never a no-op cast, no need to check
3363   SDValue N = getValue(I.getOperand(0));
3364   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3365                                                         I.getType());
3366   setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurSDLoc(), DestVT, N));
3367 }
3368 
3369 void SelectionDAGBuilder::visitPtrToInt(const User &I) {
3370   // What to do depends on the size of the integer and the size of the pointer.
3371   // We can either truncate, zero extend, or no-op, accordingly.
3372   SDValue N = getValue(I.getOperand(0));
3373   auto &TLI = DAG.getTargetLoweringInfo();
3374   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3375                                                         I.getType());
3376   EVT PtrMemVT =
3377       TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
3378   N = DAG.getPtrExtOrTrunc(N, getCurSDLoc(), PtrMemVT);
3379   N = DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT);
3380   setValue(&I, N);
3381 }
3382 
3383 void SelectionDAGBuilder::visitIntToPtr(const User &I) {
3384   // What to do depends on the size of the integer and the size of the pointer.
3385   // We can either truncate, zero extend, or no-op, accordingly.
3386   SDValue N = getValue(I.getOperand(0));
3387   auto &TLI = DAG.getTargetLoweringInfo();
3388   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3389   EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getType());
3390   N = DAG.getZExtOrTrunc(N, getCurSDLoc(), PtrMemVT);
3391   N = DAG.getPtrExtOrTrunc(N, getCurSDLoc(), DestVT);
3392   setValue(&I, N);
3393 }
3394 
3395 void SelectionDAGBuilder::visitBitCast(const User &I) {
3396   SDValue N = getValue(I.getOperand(0));
3397   SDLoc dl = getCurSDLoc();
3398   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3399                                                         I.getType());
3400 
3401   // BitCast assures us that source and destination are the same size so this is
3402   // either a BITCAST or a no-op.
3403   if (DestVT != N.getValueType())
3404     setValue(&I, DAG.getNode(ISD::BITCAST, dl,
3405                              DestVT, N)); // convert types.
3406   // Check if the original LLVM IR Operand was a ConstantInt, because getValue()
3407   // might fold any kind of constant expression to an integer constant and that
3408   // is not what we are looking for. Only recognize a bitcast of a genuine
3409   // constant integer as an opaque constant.
3410   else if(ConstantInt *C = dyn_cast<ConstantInt>(I.getOperand(0)))
3411     setValue(&I, DAG.getConstant(C->getValue(), dl, DestVT, /*isTarget=*/false,
3412                                  /*isOpaque*/true));
3413   else
3414     setValue(&I, N);            // noop cast.
3415 }
3416 
3417 void SelectionDAGBuilder::visitAddrSpaceCast(const User &I) {
3418   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3419   const Value *SV = I.getOperand(0);
3420   SDValue N = getValue(SV);
3421   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3422 
3423   unsigned SrcAS = SV->getType()->getPointerAddressSpace();
3424   unsigned DestAS = I.getType()->getPointerAddressSpace();
3425 
3426   if (!TLI.isNoopAddrSpaceCast(SrcAS, DestAS))
3427     N = DAG.getAddrSpaceCast(getCurSDLoc(), DestVT, N, SrcAS, DestAS);
3428 
3429   setValue(&I, N);
3430 }
3431 
3432 void SelectionDAGBuilder::visitInsertElement(const User &I) {
3433   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3434   SDValue InVec = getValue(I.getOperand(0));
3435   SDValue InVal = getValue(I.getOperand(1));
3436   SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(2)), getCurSDLoc(),
3437                                      TLI.getVectorIdxTy(DAG.getDataLayout()));
3438   setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurSDLoc(),
3439                            TLI.getValueType(DAG.getDataLayout(), I.getType()),
3440                            InVec, InVal, InIdx));
3441 }
3442 
3443 void SelectionDAGBuilder::visitExtractElement(const User &I) {
3444   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3445   SDValue InVec = getValue(I.getOperand(0));
3446   SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(1)), getCurSDLoc(),
3447                                      TLI.getVectorIdxTy(DAG.getDataLayout()));
3448   setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(),
3449                            TLI.getValueType(DAG.getDataLayout(), I.getType()),
3450                            InVec, InIdx));
3451 }
3452 
3453 void SelectionDAGBuilder::visitShuffleVector(const User &I) {
3454   SDValue Src1 = getValue(I.getOperand(0));
3455   SDValue Src2 = getValue(I.getOperand(1));
3456   ArrayRef<int> Mask;
3457   if (auto *SVI = dyn_cast<ShuffleVectorInst>(&I))
3458     Mask = SVI->getShuffleMask();
3459   else
3460     Mask = cast<ConstantExpr>(I).getShuffleMask();
3461   SDLoc DL = getCurSDLoc();
3462   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3463   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3464   EVT SrcVT = Src1.getValueType();
3465 
3466   if (all_of(Mask, [](int Elem) { return Elem == 0; }) &&
3467       VT.isScalableVector()) {
3468     // Canonical splat form of first element of first input vector.
3469     SDValue FirstElt =
3470         DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SrcVT.getScalarType(), Src1,
3471                     DAG.getVectorIdxConstant(0, DL));
3472     setValue(&I, DAG.getNode(ISD::SPLAT_VECTOR, DL, VT, FirstElt));
3473     return;
3474   }
3475 
3476   // For now, we only handle splats for scalable vectors.
3477   // The DAGCombiner will perform a BUILD_VECTOR -> SPLAT_VECTOR transformation
3478   // for targets that support a SPLAT_VECTOR for non-scalable vector types.
3479   assert(!VT.isScalableVector() && "Unsupported scalable vector shuffle");
3480 
3481   unsigned SrcNumElts = SrcVT.getVectorNumElements();
3482   unsigned MaskNumElts = Mask.size();
3483 
3484   if (SrcNumElts == MaskNumElts) {
3485     setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, Mask));
3486     return;
3487   }
3488 
3489   // Normalize the shuffle vector since mask and vector length don't match.
3490   if (SrcNumElts < MaskNumElts) {
3491     // Mask is longer than the source vectors. We can use concatenate vector to
3492     // make the mask and vectors lengths match.
3493 
3494     if (MaskNumElts % SrcNumElts == 0) {
3495       // Mask length is a multiple of the source vector length.
3496       // Check if the shuffle is some kind of concatenation of the input
3497       // vectors.
3498       unsigned NumConcat = MaskNumElts / SrcNumElts;
3499       bool IsConcat = true;
3500       SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
3501       for (unsigned i = 0; i != MaskNumElts; ++i) {
3502         int Idx = Mask[i];
3503         if (Idx < 0)
3504           continue;
3505         // Ensure the indices in each SrcVT sized piece are sequential and that
3506         // the same source is used for the whole piece.
3507         if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
3508             (ConcatSrcs[i / SrcNumElts] >= 0 &&
3509              ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts))) {
3510           IsConcat = false;
3511           break;
3512         }
3513         // Remember which source this index came from.
3514         ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
3515       }
3516 
3517       // The shuffle is concatenating multiple vectors together. Just emit
3518       // a CONCAT_VECTORS operation.
3519       if (IsConcat) {
3520         SmallVector<SDValue, 8> ConcatOps;
3521         for (auto Src : ConcatSrcs) {
3522           if (Src < 0)
3523             ConcatOps.push_back(DAG.getUNDEF(SrcVT));
3524           else if (Src == 0)
3525             ConcatOps.push_back(Src1);
3526           else
3527             ConcatOps.push_back(Src2);
3528         }
3529         setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps));
3530         return;
3531       }
3532     }
3533 
3534     unsigned PaddedMaskNumElts = alignTo(MaskNumElts, SrcNumElts);
3535     unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
3536     EVT PaddedVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(),
3537                                     PaddedMaskNumElts);
3538 
3539     // Pad both vectors with undefs to make them the same length as the mask.
3540     SDValue UndefVal = DAG.getUNDEF(SrcVT);
3541 
3542     SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
3543     SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
3544     MOps1[0] = Src1;
3545     MOps2[0] = Src2;
3546 
3547     Src1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps1);
3548     Src2 = DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps2);
3549 
3550     // Readjust mask for new input vector length.
3551     SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1);
3552     for (unsigned i = 0; i != MaskNumElts; ++i) {
3553       int Idx = Mask[i];
3554       if (Idx >= (int)SrcNumElts)
3555         Idx -= SrcNumElts - PaddedMaskNumElts;
3556       MappedOps[i] = Idx;
3557     }
3558 
3559     SDValue Result = DAG.getVectorShuffle(PaddedVT, DL, Src1, Src2, MappedOps);
3560 
3561     // If the concatenated vector was padded, extract a subvector with the
3562     // correct number of elements.
3563     if (MaskNumElts != PaddedMaskNumElts)
3564       Result = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Result,
3565                            DAG.getVectorIdxConstant(0, DL));
3566 
3567     setValue(&I, Result);
3568     return;
3569   }
3570 
3571   if (SrcNumElts > MaskNumElts) {
3572     // Analyze the access pattern of the vector to see if we can extract
3573     // two subvectors and do the shuffle.
3574     int StartIdx[2] = { -1, -1 };  // StartIdx to extract from
3575     bool CanExtract = true;
3576     for (int Idx : Mask) {
3577       unsigned Input = 0;
3578       if (Idx < 0)
3579         continue;
3580 
3581       if (Idx >= (int)SrcNumElts) {
3582         Input = 1;
3583         Idx -= SrcNumElts;
3584       }
3585 
3586       // If all the indices come from the same MaskNumElts sized portion of
3587       // the sources we can use extract. Also make sure the extract wouldn't
3588       // extract past the end of the source.
3589       int NewStartIdx = alignDown(Idx, MaskNumElts);
3590       if (NewStartIdx + MaskNumElts > SrcNumElts ||
3591           (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
3592         CanExtract = false;
3593       // Make sure we always update StartIdx as we use it to track if all
3594       // elements are undef.
3595       StartIdx[Input] = NewStartIdx;
3596     }
3597 
3598     if (StartIdx[0] < 0 && StartIdx[1] < 0) {
3599       setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
3600       return;
3601     }
3602     if (CanExtract) {
3603       // Extract appropriate subvector and generate a vector shuffle
3604       for (unsigned Input = 0; Input < 2; ++Input) {
3605         SDValue &Src = Input == 0 ? Src1 : Src2;
3606         if (StartIdx[Input] < 0)
3607           Src = DAG.getUNDEF(VT);
3608         else {
3609           Src = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Src,
3610                             DAG.getVectorIdxConstant(StartIdx[Input], DL));
3611         }
3612       }
3613 
3614       // Calculate new mask.
3615       SmallVector<int, 8> MappedOps(Mask.begin(), Mask.end());
3616       for (int &Idx : MappedOps) {
3617         if (Idx >= (int)SrcNumElts)
3618           Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
3619         else if (Idx >= 0)
3620           Idx -= StartIdx[0];
3621       }
3622 
3623       setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, MappedOps));
3624       return;
3625     }
3626   }
3627 
3628   // We can't use either concat vectors or extract subvectors so fall back to
3629   // replacing the shuffle with extract and build vector.
3630   // to insert and build vector.
3631   EVT EltVT = VT.getVectorElementType();
3632   SmallVector<SDValue,8> Ops;
3633   for (int Idx : Mask) {
3634     SDValue Res;
3635 
3636     if (Idx < 0) {
3637       Res = DAG.getUNDEF(EltVT);
3638     } else {
3639       SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
3640       if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts;
3641 
3642       Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Src,
3643                         DAG.getVectorIdxConstant(Idx, DL));
3644     }
3645 
3646     Ops.push_back(Res);
3647   }
3648 
3649   setValue(&I, DAG.getBuildVector(VT, DL, Ops));
3650 }
3651 
3652 void SelectionDAGBuilder::visitInsertValue(const User &I) {
3653   ArrayRef<unsigned> Indices;
3654   if (const InsertValueInst *IV = dyn_cast<InsertValueInst>(&I))
3655     Indices = IV->getIndices();
3656   else
3657     Indices = cast<ConstantExpr>(&I)->getIndices();
3658 
3659   const Value *Op0 = I.getOperand(0);
3660   const Value *Op1 = I.getOperand(1);
3661   Type *AggTy = I.getType();
3662   Type *ValTy = Op1->getType();
3663   bool IntoUndef = isa<UndefValue>(Op0);
3664   bool FromUndef = isa<UndefValue>(Op1);
3665 
3666   unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
3667 
3668   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3669   SmallVector<EVT, 4> AggValueVTs;
3670   ComputeValueVTs(TLI, DAG.getDataLayout(), AggTy, AggValueVTs);
3671   SmallVector<EVT, 4> ValValueVTs;
3672   ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
3673 
3674   unsigned NumAggValues = AggValueVTs.size();
3675   unsigned NumValValues = ValValueVTs.size();
3676   SmallVector<SDValue, 4> Values(NumAggValues);
3677 
3678   // Ignore an insertvalue that produces an empty object
3679   if (!NumAggValues) {
3680     setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
3681     return;
3682   }
3683 
3684   SDValue Agg = getValue(Op0);
3685   unsigned i = 0;
3686   // Copy the beginning value(s) from the original aggregate.
3687   for (; i != LinearIndex; ++i)
3688     Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3689                 SDValue(Agg.getNode(), Agg.getResNo() + i);
3690   // Copy values from the inserted value(s).
3691   if (NumValValues) {
3692     SDValue Val = getValue(Op1);
3693     for (; i != LinearIndex + NumValValues; ++i)
3694       Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3695                   SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
3696   }
3697   // Copy remaining value(s) from the original aggregate.
3698   for (; i != NumAggValues; ++i)
3699     Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3700                 SDValue(Agg.getNode(), Agg.getResNo() + i);
3701 
3702   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3703                            DAG.getVTList(AggValueVTs), Values));
3704 }
3705 
3706 void SelectionDAGBuilder::visitExtractValue(const User &I) {
3707   ArrayRef<unsigned> Indices;
3708   if (const ExtractValueInst *EV = dyn_cast<ExtractValueInst>(&I))
3709     Indices = EV->getIndices();
3710   else
3711     Indices = cast<ConstantExpr>(&I)->getIndices();
3712 
3713   const Value *Op0 = I.getOperand(0);
3714   Type *AggTy = Op0->getType();
3715   Type *ValTy = I.getType();
3716   bool OutOfUndef = isa<UndefValue>(Op0);
3717 
3718   unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
3719 
3720   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3721   SmallVector<EVT, 4> ValValueVTs;
3722   ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
3723 
3724   unsigned NumValValues = ValValueVTs.size();
3725 
3726   // Ignore a extractvalue that produces an empty object
3727   if (!NumValValues) {
3728     setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
3729     return;
3730   }
3731 
3732   SmallVector<SDValue, 4> Values(NumValValues);
3733 
3734   SDValue Agg = getValue(Op0);
3735   // Copy out the selected value(s).
3736   for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
3737     Values[i - LinearIndex] =
3738       OutOfUndef ?
3739         DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
3740         SDValue(Agg.getNode(), Agg.getResNo() + i);
3741 
3742   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3743                            DAG.getVTList(ValValueVTs), Values));
3744 }
3745 
3746 void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
3747   Value *Op0 = I.getOperand(0);
3748   // Note that the pointer operand may be a vector of pointers. Take the scalar
3749   // element which holds a pointer.
3750   unsigned AS = Op0->getType()->getScalarType()->getPointerAddressSpace();
3751   SDValue N = getValue(Op0);
3752   SDLoc dl = getCurSDLoc();
3753   auto &TLI = DAG.getTargetLoweringInfo();
3754   MVT PtrTy = TLI.getPointerTy(DAG.getDataLayout(), AS);
3755   MVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout(), AS);
3756 
3757   // Normalize Vector GEP - all scalar operands should be converted to the
3758   // splat vector.
3759   bool IsVectorGEP = I.getType()->isVectorTy();
3760   ElementCount VectorElementCount =
3761       IsVectorGEP ? cast<VectorType>(I.getType())->getElementCount()
3762                   : ElementCount(0, false);
3763 
3764   if (IsVectorGEP && !N.getValueType().isVector()) {
3765     LLVMContext &Context = *DAG.getContext();
3766     EVT VT = EVT::getVectorVT(Context, N.getValueType(), VectorElementCount);
3767     if (VectorElementCount.Scalable)
3768       N = DAG.getSplatVector(VT, dl, N);
3769     else
3770       N = DAG.getSplatBuildVector(VT, dl, N);
3771   }
3772 
3773   for (gep_type_iterator GTI = gep_type_begin(&I), E = gep_type_end(&I);
3774        GTI != E; ++GTI) {
3775     const Value *Idx = GTI.getOperand();
3776     if (StructType *StTy = GTI.getStructTypeOrNull()) {
3777       unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
3778       if (Field) {
3779         // N = N + Offset
3780         uint64_t Offset = DL->getStructLayout(StTy)->getElementOffset(Field);
3781 
3782         // In an inbounds GEP with an offset that is nonnegative even when
3783         // interpreted as signed, assume there is no unsigned overflow.
3784         SDNodeFlags Flags;
3785         if (int64_t(Offset) >= 0 && cast<GEPOperator>(I).isInBounds())
3786           Flags.setNoUnsignedWrap(true);
3787 
3788         N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N,
3789                         DAG.getConstant(Offset, dl, N.getValueType()), Flags);
3790       }
3791     } else {
3792       // IdxSize is the width of the arithmetic according to IR semantics.
3793       // In SelectionDAG, we may prefer to do arithmetic in a wider bitwidth
3794       // (and fix up the result later).
3795       unsigned IdxSize = DAG.getDataLayout().getIndexSizeInBits(AS);
3796       MVT IdxTy = MVT::getIntegerVT(IdxSize);
3797       TypeSize ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
3798       // We intentionally mask away the high bits here; ElementSize may not
3799       // fit in IdxTy.
3800       APInt ElementMul(IdxSize, ElementSize.getKnownMinSize());
3801       bool ElementScalable = ElementSize.isScalable();
3802 
3803       // If this is a scalar constant or a splat vector of constants,
3804       // handle it quickly.
3805       const auto *C = dyn_cast<Constant>(Idx);
3806       if (C && isa<VectorType>(C->getType()))
3807         C = C->getSplatValue();
3808 
3809       const auto *CI = dyn_cast_or_null<ConstantInt>(C);
3810       if (CI && CI->isZero())
3811         continue;
3812       if (CI && !ElementScalable) {
3813         APInt Offs = ElementMul * CI->getValue().sextOrTrunc(IdxSize);
3814         LLVMContext &Context = *DAG.getContext();
3815         SDValue OffsVal;
3816         if (IsVectorGEP)
3817           OffsVal = DAG.getConstant(
3818               Offs, dl, EVT::getVectorVT(Context, IdxTy, VectorElementCount));
3819         else
3820           OffsVal = DAG.getConstant(Offs, dl, IdxTy);
3821 
3822         // In an inbounds GEP with an offset that is nonnegative even when
3823         // interpreted as signed, assume there is no unsigned overflow.
3824         SDNodeFlags Flags;
3825         if (Offs.isNonNegative() && cast<GEPOperator>(I).isInBounds())
3826           Flags.setNoUnsignedWrap(true);
3827 
3828         OffsVal = DAG.getSExtOrTrunc(OffsVal, dl, N.getValueType());
3829 
3830         N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N, OffsVal, Flags);
3831         continue;
3832       }
3833 
3834       // N = N + Idx * ElementMul;
3835       SDValue IdxN = getValue(Idx);
3836 
3837       if (!IdxN.getValueType().isVector() && IsVectorGEP) {
3838         EVT VT = EVT::getVectorVT(*Context, IdxN.getValueType(),
3839                                   VectorElementCount);
3840         if (VectorElementCount.Scalable)
3841           IdxN = DAG.getSplatVector(VT, dl, IdxN);
3842         else
3843           IdxN = DAG.getSplatBuildVector(VT, dl, IdxN);
3844       }
3845 
3846       // If the index is smaller or larger than intptr_t, truncate or extend
3847       // it.
3848       IdxN = DAG.getSExtOrTrunc(IdxN, dl, N.getValueType());
3849 
3850       if (ElementScalable) {
3851         EVT VScaleTy = N.getValueType().getScalarType();
3852         SDValue VScale = DAG.getNode(
3853             ISD::VSCALE, dl, VScaleTy,
3854             DAG.getConstant(ElementMul.getZExtValue(), dl, VScaleTy));
3855         if (IsVectorGEP)
3856           VScale = DAG.getSplatVector(N.getValueType(), dl, VScale);
3857         IdxN = DAG.getNode(ISD::MUL, dl, N.getValueType(), IdxN, VScale);
3858       } else {
3859         // If this is a multiply by a power of two, turn it into a shl
3860         // immediately.  This is a very common case.
3861         if (ElementMul != 1) {
3862           if (ElementMul.isPowerOf2()) {
3863             unsigned Amt = ElementMul.logBase2();
3864             IdxN = DAG.getNode(ISD::SHL, dl,
3865                                N.getValueType(), IdxN,
3866                                DAG.getConstant(Amt, dl, IdxN.getValueType()));
3867           } else {
3868             SDValue Scale = DAG.getConstant(ElementMul.getZExtValue(), dl,
3869                                             IdxN.getValueType());
3870             IdxN = DAG.getNode(ISD::MUL, dl,
3871                                N.getValueType(), IdxN, Scale);
3872           }
3873         }
3874       }
3875 
3876       N = DAG.getNode(ISD::ADD, dl,
3877                       N.getValueType(), N, IdxN);
3878     }
3879   }
3880 
3881   if (PtrMemTy != PtrTy && !cast<GEPOperator>(I).isInBounds())
3882     N = DAG.getPtrExtendInReg(N, dl, PtrMemTy);
3883 
3884   setValue(&I, N);
3885 }
3886 
3887 void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
3888   // If this is a fixed sized alloca in the entry block of the function,
3889   // allocate it statically on the stack.
3890   if (FuncInfo.StaticAllocaMap.count(&I))
3891     return;   // getValue will auto-populate this.
3892 
3893   SDLoc dl = getCurSDLoc();
3894   Type *Ty = I.getAllocatedType();
3895   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3896   auto &DL = DAG.getDataLayout();
3897   uint64_t TySize = DL.getTypeAllocSize(Ty);
3898   MaybeAlign Alignment = std::max(DL.getPrefTypeAlign(Ty), I.getAlign());
3899 
3900   SDValue AllocSize = getValue(I.getArraySize());
3901 
3902   EVT IntPtr = TLI.getPointerTy(DAG.getDataLayout(), DL.getAllocaAddrSpace());
3903   if (AllocSize.getValueType() != IntPtr)
3904     AllocSize = DAG.getZExtOrTrunc(AllocSize, dl, IntPtr);
3905 
3906   AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr,
3907                           AllocSize,
3908                           DAG.getConstant(TySize, dl, IntPtr));
3909 
3910   // Handle alignment.  If the requested alignment is less than or equal to
3911   // the stack alignment, ignore it.  If the size is greater than or equal to
3912   // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
3913   Align StackAlign = DAG.getSubtarget().getFrameLowering()->getStackAlign();
3914   if (*Alignment <= StackAlign)
3915     Alignment = None;
3916 
3917   const uint64_t StackAlignMask = StackAlign.value() - 1U;
3918   // Round the size of the allocation up to the stack alignment size
3919   // by add SA-1 to the size. This doesn't overflow because we're computing
3920   // an address inside an alloca.
3921   SDNodeFlags Flags;
3922   Flags.setNoUnsignedWrap(true);
3923   AllocSize = DAG.getNode(ISD::ADD, dl, AllocSize.getValueType(), AllocSize,
3924                           DAG.getConstant(StackAlignMask, dl, IntPtr), Flags);
3925 
3926   // Mask out the low bits for alignment purposes.
3927   AllocSize = DAG.getNode(ISD::AND, dl, AllocSize.getValueType(), AllocSize,
3928                           DAG.getConstant(~StackAlignMask, dl, IntPtr));
3929 
3930   SDValue Ops[] = {
3931       getRoot(), AllocSize,
3932       DAG.getConstant(Alignment ? Alignment->value() : 0, dl, IntPtr)};
3933   SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
3934   SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, dl, VTs, Ops);
3935   setValue(&I, DSA);
3936   DAG.setRoot(DSA.getValue(1));
3937 
3938   assert(FuncInfo.MF->getFrameInfo().hasVarSizedObjects());
3939 }
3940 
3941 void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
3942   if (I.isAtomic())
3943     return visitAtomicLoad(I);
3944 
3945   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3946   const Value *SV = I.getOperand(0);
3947   if (TLI.supportSwiftError()) {
3948     // Swifterror values can come from either a function parameter with
3949     // swifterror attribute or an alloca with swifterror attribute.
3950     if (const Argument *Arg = dyn_cast<Argument>(SV)) {
3951       if (Arg->hasSwiftErrorAttr())
3952         return visitLoadFromSwiftError(I);
3953     }
3954 
3955     if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
3956       if (Alloca->isSwiftError())
3957         return visitLoadFromSwiftError(I);
3958     }
3959   }
3960 
3961   SDValue Ptr = getValue(SV);
3962 
3963   Type *Ty = I.getType();
3964   Align Alignment = I.getAlign();
3965 
3966   AAMDNodes AAInfo;
3967   I.getAAMetadata(AAInfo);
3968   const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
3969 
3970   SmallVector<EVT, 4> ValueVTs, MemVTs;
3971   SmallVector<uint64_t, 4> Offsets;
3972   ComputeValueVTs(TLI, DAG.getDataLayout(), Ty, ValueVTs, &MemVTs, &Offsets);
3973   unsigned NumValues = ValueVTs.size();
3974   if (NumValues == 0)
3975     return;
3976 
3977   bool isVolatile = I.isVolatile();
3978 
3979   SDValue Root;
3980   bool ConstantMemory = false;
3981   if (isVolatile)
3982     // Serialize volatile loads with other side effects.
3983     Root = getRoot();
3984   else if (NumValues > MaxParallelChains)
3985     Root = getMemoryRoot();
3986   else if (AA &&
3987            AA->pointsToConstantMemory(MemoryLocation(
3988                SV,
3989                LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)),
3990                AAInfo))) {
3991     // Do not serialize (non-volatile) loads of constant memory with anything.
3992     Root = DAG.getEntryNode();
3993     ConstantMemory = true;
3994   } else {
3995     // Do not serialize non-volatile loads against each other.
3996     Root = DAG.getRoot();
3997   }
3998 
3999   SDLoc dl = getCurSDLoc();
4000 
4001   if (isVolatile)
4002     Root = TLI.prepareVolatileOrAtomicLoad(Root, dl, DAG);
4003 
4004   // An aggregate load cannot wrap around the address space, so offsets to its
4005   // parts don't wrap either.
4006   SDNodeFlags Flags;
4007   Flags.setNoUnsignedWrap(true);
4008 
4009   SmallVector<SDValue, 4> Values(NumValues);
4010   SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
4011   EVT PtrVT = Ptr.getValueType();
4012 
4013   MachineMemOperand::Flags MMOFlags
4014     = TLI.getLoadMemOperandFlags(I, DAG.getDataLayout());
4015 
4016   unsigned ChainI = 0;
4017   for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4018     // Serializing loads here may result in excessive register pressure, and
4019     // TokenFactor places arbitrary choke points on the scheduler. SD scheduling
4020     // could recover a bit by hoisting nodes upward in the chain by recognizing
4021     // they are side-effect free or do not alias. The optimizer should really
4022     // avoid this case by converting large object/array copies to llvm.memcpy
4023     // (MaxParallelChains should always remain as failsafe).
4024     if (ChainI == MaxParallelChains) {
4025       assert(PendingLoads.empty() && "PendingLoads must be serialized first");
4026       SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4027                                   makeArrayRef(Chains.data(), ChainI));
4028       Root = Chain;
4029       ChainI = 0;
4030     }
4031     SDValue A = DAG.getNode(ISD::ADD, dl,
4032                             PtrVT, Ptr,
4033                             DAG.getConstant(Offsets[i], dl, PtrVT),
4034                             Flags);
4035 
4036     SDValue L = DAG.getLoad(MemVTs[i], dl, Root, A,
4037                             MachinePointerInfo(SV, Offsets[i]), Alignment,
4038                             MMOFlags, AAInfo, Ranges);
4039     Chains[ChainI] = L.getValue(1);
4040 
4041     if (MemVTs[i] != ValueVTs[i])
4042       L = DAG.getZExtOrTrunc(L, dl, ValueVTs[i]);
4043 
4044     Values[i] = L;
4045   }
4046 
4047   if (!ConstantMemory) {
4048     SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4049                                 makeArrayRef(Chains.data(), ChainI));
4050     if (isVolatile)
4051       DAG.setRoot(Chain);
4052     else
4053       PendingLoads.push_back(Chain);
4054   }
4055 
4056   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, dl,
4057                            DAG.getVTList(ValueVTs), Values));
4058 }
4059 
4060 void SelectionDAGBuilder::visitStoreToSwiftError(const StoreInst &I) {
4061   assert(DAG.getTargetLoweringInfo().supportSwiftError() &&
4062          "call visitStoreToSwiftError when backend supports swifterror");
4063 
4064   SmallVector<EVT, 4> ValueVTs;
4065   SmallVector<uint64_t, 4> Offsets;
4066   const Value *SrcV = I.getOperand(0);
4067   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
4068                   SrcV->getType(), ValueVTs, &Offsets);
4069   assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
4070          "expect a single EVT for swifterror");
4071 
4072   SDValue Src = getValue(SrcV);
4073   // Create a virtual register, then update the virtual register.
4074   Register VReg =
4075       SwiftError.getOrCreateVRegDefAt(&I, FuncInfo.MBB, I.getPointerOperand());
4076   // Chain, DL, Reg, N or Chain, DL, Reg, N, Glue
4077   // Chain can be getRoot or getControlRoot.
4078   SDValue CopyNode = DAG.getCopyToReg(getRoot(), getCurSDLoc(), VReg,
4079                                       SDValue(Src.getNode(), Src.getResNo()));
4080   DAG.setRoot(CopyNode);
4081 }
4082 
4083 void SelectionDAGBuilder::visitLoadFromSwiftError(const LoadInst &I) {
4084   assert(DAG.getTargetLoweringInfo().supportSwiftError() &&
4085          "call visitLoadFromSwiftError when backend supports swifterror");
4086 
4087   assert(!I.isVolatile() &&
4088          !I.hasMetadata(LLVMContext::MD_nontemporal) &&
4089          !I.hasMetadata(LLVMContext::MD_invariant_load) &&
4090          "Support volatile, non temporal, invariant for load_from_swift_error");
4091 
4092   const Value *SV = I.getOperand(0);
4093   Type *Ty = I.getType();
4094   AAMDNodes AAInfo;
4095   I.getAAMetadata(AAInfo);
4096   assert(
4097       (!AA ||
4098        !AA->pointsToConstantMemory(MemoryLocation(
4099            SV, LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)),
4100            AAInfo))) &&
4101       "load_from_swift_error should not be constant memory");
4102 
4103   SmallVector<EVT, 4> ValueVTs;
4104   SmallVector<uint64_t, 4> Offsets;
4105   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), Ty,
4106                   ValueVTs, &Offsets);
4107   assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
4108          "expect a single EVT for swifterror");
4109 
4110   // Chain, DL, Reg, VT, Glue or Chain, DL, Reg, VT
4111   SDValue L = DAG.getCopyFromReg(
4112       getRoot(), getCurSDLoc(),
4113       SwiftError.getOrCreateVRegUseAt(&I, FuncInfo.MBB, SV), ValueVTs[0]);
4114 
4115   setValue(&I, L);
4116 }
4117 
4118 void SelectionDAGBuilder::visitStore(const StoreInst &I) {
4119   if (I.isAtomic())
4120     return visitAtomicStore(I);
4121 
4122   const Value *SrcV = I.getOperand(0);
4123   const Value *PtrV = I.getOperand(1);
4124 
4125   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4126   if (TLI.supportSwiftError()) {
4127     // Swifterror values can come from either a function parameter with
4128     // swifterror attribute or an alloca with swifterror attribute.
4129     if (const Argument *Arg = dyn_cast<Argument>(PtrV)) {
4130       if (Arg->hasSwiftErrorAttr())
4131         return visitStoreToSwiftError(I);
4132     }
4133 
4134     if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
4135       if (Alloca->isSwiftError())
4136         return visitStoreToSwiftError(I);
4137     }
4138   }
4139 
4140   SmallVector<EVT, 4> ValueVTs, MemVTs;
4141   SmallVector<uint64_t, 4> Offsets;
4142   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
4143                   SrcV->getType(), ValueVTs, &MemVTs, &Offsets);
4144   unsigned NumValues = ValueVTs.size();
4145   if (NumValues == 0)
4146     return;
4147 
4148   // Get the lowered operands. Note that we do this after
4149   // checking if NumResults is zero, because with zero results
4150   // the operands won't have values in the map.
4151   SDValue Src = getValue(SrcV);
4152   SDValue Ptr = getValue(PtrV);
4153 
4154   SDValue Root = I.isVolatile() ? getRoot() : getMemoryRoot();
4155   SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
4156   SDLoc dl = getCurSDLoc();
4157   Align Alignment = I.getAlign();
4158   AAMDNodes AAInfo;
4159   I.getAAMetadata(AAInfo);
4160 
4161   auto MMOFlags = TLI.getStoreMemOperandFlags(I, DAG.getDataLayout());
4162 
4163   // An aggregate load cannot wrap around the address space, so offsets to its
4164   // parts don't wrap either.
4165   SDNodeFlags Flags;
4166   Flags.setNoUnsignedWrap(true);
4167 
4168   unsigned ChainI = 0;
4169   for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4170     // See visitLoad comments.
4171     if (ChainI == MaxParallelChains) {
4172       SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4173                                   makeArrayRef(Chains.data(), ChainI));
4174       Root = Chain;
4175       ChainI = 0;
4176     }
4177     SDValue Add = DAG.getMemBasePlusOffset(Ptr, Offsets[i], dl, Flags);
4178     SDValue Val = SDValue(Src.getNode(), Src.getResNo() + i);
4179     if (MemVTs[i] != ValueVTs[i])
4180       Val = DAG.getPtrExtOrTrunc(Val, dl, MemVTs[i]);
4181     SDValue St =
4182         DAG.getStore(Root, dl, Val, Add, MachinePointerInfo(PtrV, Offsets[i]),
4183                      Alignment, MMOFlags, AAInfo);
4184     Chains[ChainI] = St;
4185   }
4186 
4187   SDValue StoreNode = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4188                                   makeArrayRef(Chains.data(), ChainI));
4189   DAG.setRoot(StoreNode);
4190 }
4191 
4192 void SelectionDAGBuilder::visitMaskedStore(const CallInst &I,
4193                                            bool IsCompressing) {
4194   SDLoc sdl = getCurSDLoc();
4195 
4196   auto getMaskedStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4197                                MaybeAlign &Alignment) {
4198     // llvm.masked.store.*(Src0, Ptr, alignment, Mask)
4199     Src0 = I.getArgOperand(0);
4200     Ptr = I.getArgOperand(1);
4201     Alignment =
4202         MaybeAlign(cast<ConstantInt>(I.getArgOperand(2))->getZExtValue());
4203     Mask = I.getArgOperand(3);
4204   };
4205   auto getCompressingStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4206                                     MaybeAlign &Alignment) {
4207     // llvm.masked.compressstore.*(Src0, Ptr, Mask)
4208     Src0 = I.getArgOperand(0);
4209     Ptr = I.getArgOperand(1);
4210     Mask = I.getArgOperand(2);
4211     Alignment = None;
4212   };
4213 
4214   Value  *PtrOperand, *MaskOperand, *Src0Operand;
4215   MaybeAlign Alignment;
4216   if (IsCompressing)
4217     getCompressingStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4218   else
4219     getMaskedStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4220 
4221   SDValue Ptr = getValue(PtrOperand);
4222   SDValue Src0 = getValue(Src0Operand);
4223   SDValue Mask = getValue(MaskOperand);
4224   SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
4225 
4226   EVT VT = Src0.getValueType();
4227   if (!Alignment)
4228     Alignment = DAG.getEVTAlign(VT);
4229 
4230   AAMDNodes AAInfo;
4231   I.getAAMetadata(AAInfo);
4232 
4233   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4234       MachinePointerInfo(PtrOperand), MachineMemOperand::MOStore,
4235       // TODO: Make MachineMemOperands aware of scalable
4236       // vectors.
4237       VT.getStoreSize().getKnownMinSize(), *Alignment, AAInfo);
4238   SDValue StoreNode =
4239       DAG.getMaskedStore(getMemoryRoot(), sdl, Src0, Ptr, Offset, Mask, VT, MMO,
4240                          ISD::UNINDEXED, false /* Truncating */, IsCompressing);
4241   DAG.setRoot(StoreNode);
4242   setValue(&I, StoreNode);
4243 }
4244 
4245 // Get a uniform base for the Gather/Scatter intrinsic.
4246 // The first argument of the Gather/Scatter intrinsic is a vector of pointers.
4247 // We try to represent it as a base pointer + vector of indices.
4248 // Usually, the vector of pointers comes from a 'getelementptr' instruction.
4249 // The first operand of the GEP may be a single pointer or a vector of pointers
4250 // Example:
4251 //   %gep.ptr = getelementptr i32, <8 x i32*> %vptr, <8 x i32> %ind
4252 //  or
4253 //   %gep.ptr = getelementptr i32, i32* %ptr,        <8 x i32> %ind
4254 // %res = call <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> %gep.ptr, ..
4255 //
4256 // When the first GEP operand is a single pointer - it is the uniform base we
4257 // are looking for. If first operand of the GEP is a splat vector - we
4258 // extract the splat value and use it as a uniform base.
4259 // In all other cases the function returns 'false'.
4260 static bool getUniformBase(const Value *Ptr, SDValue &Base, SDValue &Index,
4261                            ISD::MemIndexType &IndexType, SDValue &Scale,
4262                            SelectionDAGBuilder *SDB, const BasicBlock *CurBB) {
4263   SelectionDAG& DAG = SDB->DAG;
4264   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4265   const DataLayout &DL = DAG.getDataLayout();
4266 
4267   assert(Ptr->getType()->isVectorTy() && "Uexpected pointer type");
4268 
4269   // Handle splat constant pointer.
4270   if (auto *C = dyn_cast<Constant>(Ptr)) {
4271     C = C->getSplatValue();
4272     if (!C)
4273       return false;
4274 
4275     Base = SDB->getValue(C);
4276 
4277     unsigned NumElts = cast<VectorType>(Ptr->getType())->getNumElements();
4278     EVT VT = EVT::getVectorVT(*DAG.getContext(), TLI.getPointerTy(DL), NumElts);
4279     Index = DAG.getConstant(0, SDB->getCurSDLoc(), VT);
4280     IndexType = ISD::SIGNED_SCALED;
4281     Scale = DAG.getTargetConstant(1, SDB->getCurSDLoc(), TLI.getPointerTy(DL));
4282     return true;
4283   }
4284 
4285   const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
4286   if (!GEP || GEP->getParent() != CurBB)
4287     return false;
4288 
4289   if (GEP->getNumOperands() != 2)
4290     return false;
4291 
4292   const Value *BasePtr = GEP->getPointerOperand();
4293   const Value *IndexVal = GEP->getOperand(GEP->getNumOperands() - 1);
4294 
4295   // Make sure the base is scalar and the index is a vector.
4296   if (BasePtr->getType()->isVectorTy() || !IndexVal->getType()->isVectorTy())
4297     return false;
4298 
4299   Base = SDB->getValue(BasePtr);
4300   Index = SDB->getValue(IndexVal);
4301   IndexType = ISD::SIGNED_SCALED;
4302   Scale = DAG.getTargetConstant(
4303               DL.getTypeAllocSize(GEP->getResultElementType()),
4304               SDB->getCurSDLoc(), TLI.getPointerTy(DL));
4305   return true;
4306 }
4307 
4308 void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
4309   SDLoc sdl = getCurSDLoc();
4310 
4311   // llvm.masked.scatter.*(Src0, Ptrs, alignment, Mask)
4312   const Value *Ptr = I.getArgOperand(1);
4313   SDValue Src0 = getValue(I.getArgOperand(0));
4314   SDValue Mask = getValue(I.getArgOperand(3));
4315   EVT VT = Src0.getValueType();
4316   MaybeAlign Alignment(cast<ConstantInt>(I.getArgOperand(2))->getZExtValue());
4317   if (!Alignment)
4318     Alignment = DAG.getEVTAlign(VT);
4319   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4320 
4321   AAMDNodes AAInfo;
4322   I.getAAMetadata(AAInfo);
4323 
4324   SDValue Base;
4325   SDValue Index;
4326   ISD::MemIndexType IndexType;
4327   SDValue Scale;
4328   bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this,
4329                                     I.getParent());
4330 
4331   unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
4332   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4333       MachinePointerInfo(AS), MachineMemOperand::MOStore,
4334       // TODO: Make MachineMemOperands aware of scalable
4335       // vectors.
4336       MemoryLocation::UnknownSize, *Alignment, AAInfo);
4337   if (!UniformBase) {
4338     Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4339     Index = getValue(Ptr);
4340     IndexType = ISD::SIGNED_SCALED;
4341     Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4342   }
4343   SDValue Ops[] = { getMemoryRoot(), Src0, Mask, Base, Index, Scale };
4344   SDValue Scatter = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), VT, sdl,
4345                                          Ops, MMO, IndexType);
4346   DAG.setRoot(Scatter);
4347   setValue(&I, Scatter);
4348 }
4349 
4350 void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) {
4351   SDLoc sdl = getCurSDLoc();
4352 
4353   auto getMaskedLoadOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4354                               MaybeAlign &Alignment) {
4355     // @llvm.masked.load.*(Ptr, alignment, Mask, Src0)
4356     Ptr = I.getArgOperand(0);
4357     Alignment =
4358         MaybeAlign(cast<ConstantInt>(I.getArgOperand(1))->getZExtValue());
4359     Mask = I.getArgOperand(2);
4360     Src0 = I.getArgOperand(3);
4361   };
4362   auto getExpandingLoadOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4363                                  MaybeAlign &Alignment) {
4364     // @llvm.masked.expandload.*(Ptr, Mask, Src0)
4365     Ptr = I.getArgOperand(0);
4366     Alignment = None;
4367     Mask = I.getArgOperand(1);
4368     Src0 = I.getArgOperand(2);
4369   };
4370 
4371   Value  *PtrOperand, *MaskOperand, *Src0Operand;
4372   MaybeAlign Alignment;
4373   if (IsExpanding)
4374     getExpandingLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4375   else
4376     getMaskedLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4377 
4378   SDValue Ptr = getValue(PtrOperand);
4379   SDValue Src0 = getValue(Src0Operand);
4380   SDValue Mask = getValue(MaskOperand);
4381   SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
4382 
4383   EVT VT = Src0.getValueType();
4384   if (!Alignment)
4385     Alignment = DAG.getEVTAlign(VT);
4386 
4387   AAMDNodes AAInfo;
4388   I.getAAMetadata(AAInfo);
4389   const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
4390 
4391   // Do not serialize masked loads of constant memory with anything.
4392   MemoryLocation ML;
4393   if (VT.isScalableVector())
4394     ML = MemoryLocation(PtrOperand);
4395   else
4396     ML = MemoryLocation(PtrOperand, LocationSize::precise(
4397                            DAG.getDataLayout().getTypeStoreSize(I.getType())),
4398                            AAInfo);
4399   bool AddToChain = !AA || !AA->pointsToConstantMemory(ML);
4400 
4401   SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
4402 
4403   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4404       MachinePointerInfo(PtrOperand), MachineMemOperand::MOLoad,
4405       // TODO: Make MachineMemOperands aware of scalable
4406       // vectors.
4407       VT.getStoreSize().getKnownMinSize(), *Alignment, AAInfo, Ranges);
4408 
4409   SDValue Load =
4410       DAG.getMaskedLoad(VT, sdl, InChain, Ptr, Offset, Mask, Src0, VT, MMO,
4411                         ISD::UNINDEXED, ISD::NON_EXTLOAD, IsExpanding);
4412   if (AddToChain)
4413     PendingLoads.push_back(Load.getValue(1));
4414   setValue(&I, Load);
4415 }
4416 
4417 void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
4418   SDLoc sdl = getCurSDLoc();
4419 
4420   // @llvm.masked.gather.*(Ptrs, alignment, Mask, Src0)
4421   const Value *Ptr = I.getArgOperand(0);
4422   SDValue Src0 = getValue(I.getArgOperand(3));
4423   SDValue Mask = getValue(I.getArgOperand(2));
4424 
4425   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4426   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4427   MaybeAlign Alignment(cast<ConstantInt>(I.getArgOperand(1))->getZExtValue());
4428   if (!Alignment)
4429     Alignment = DAG.getEVTAlign(VT);
4430 
4431   AAMDNodes AAInfo;
4432   I.getAAMetadata(AAInfo);
4433   const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
4434 
4435   SDValue Root = DAG.getRoot();
4436   SDValue Base;
4437   SDValue Index;
4438   ISD::MemIndexType IndexType;
4439   SDValue Scale;
4440   bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this,
4441                                     I.getParent());
4442   unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
4443   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4444       MachinePointerInfo(AS), MachineMemOperand::MOLoad,
4445       // TODO: Make MachineMemOperands aware of scalable
4446       // vectors.
4447       MemoryLocation::UnknownSize, *Alignment, AAInfo, Ranges);
4448 
4449   if (!UniformBase) {
4450     Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4451     Index = getValue(Ptr);
4452     IndexType = ISD::SIGNED_SCALED;
4453     Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4454   }
4455   SDValue Ops[] = { Root, Src0, Mask, Base, Index, Scale };
4456   SDValue Gather = DAG.getMaskedGather(DAG.getVTList(VT, MVT::Other), VT, sdl,
4457                                        Ops, MMO, IndexType);
4458 
4459   PendingLoads.push_back(Gather.getValue(1));
4460   setValue(&I, Gather);
4461 }
4462 
4463 void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
4464   SDLoc dl = getCurSDLoc();
4465   AtomicOrdering SuccessOrdering = I.getSuccessOrdering();
4466   AtomicOrdering FailureOrdering = I.getFailureOrdering();
4467   SyncScope::ID SSID = I.getSyncScopeID();
4468 
4469   SDValue InChain = getRoot();
4470 
4471   MVT MemVT = getValue(I.getCompareOperand()).getSimpleValueType();
4472   SDVTList VTs = DAG.getVTList(MemVT, MVT::i1, MVT::Other);
4473 
4474   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4475   auto Flags = TLI.getAtomicMemOperandFlags(I, DAG.getDataLayout());
4476 
4477   MachineFunction &MF = DAG.getMachineFunction();
4478   MachineMemOperand *MMO = MF.getMachineMemOperand(
4479       MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4480       DAG.getEVTAlign(MemVT), AAMDNodes(), nullptr, SSID, SuccessOrdering,
4481       FailureOrdering);
4482 
4483   SDValue L = DAG.getAtomicCmpSwap(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS,
4484                                    dl, MemVT, VTs, InChain,
4485                                    getValue(I.getPointerOperand()),
4486                                    getValue(I.getCompareOperand()),
4487                                    getValue(I.getNewValOperand()), MMO);
4488 
4489   SDValue OutChain = L.getValue(2);
4490 
4491   setValue(&I, L);
4492   DAG.setRoot(OutChain);
4493 }
4494 
4495 void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
4496   SDLoc dl = getCurSDLoc();
4497   ISD::NodeType NT;
4498   switch (I.getOperation()) {
4499   default: llvm_unreachable("Unknown atomicrmw operation");
4500   case AtomicRMWInst::Xchg: NT = ISD::ATOMIC_SWAP; break;
4501   case AtomicRMWInst::Add:  NT = ISD::ATOMIC_LOAD_ADD; break;
4502   case AtomicRMWInst::Sub:  NT = ISD::ATOMIC_LOAD_SUB; break;
4503   case AtomicRMWInst::And:  NT = ISD::ATOMIC_LOAD_AND; break;
4504   case AtomicRMWInst::Nand: NT = ISD::ATOMIC_LOAD_NAND; break;
4505   case AtomicRMWInst::Or:   NT = ISD::ATOMIC_LOAD_OR; break;
4506   case AtomicRMWInst::Xor:  NT = ISD::ATOMIC_LOAD_XOR; break;
4507   case AtomicRMWInst::Max:  NT = ISD::ATOMIC_LOAD_MAX; break;
4508   case AtomicRMWInst::Min:  NT = ISD::ATOMIC_LOAD_MIN; break;
4509   case AtomicRMWInst::UMax: NT = ISD::ATOMIC_LOAD_UMAX; break;
4510   case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break;
4511   case AtomicRMWInst::FAdd: NT = ISD::ATOMIC_LOAD_FADD; break;
4512   case AtomicRMWInst::FSub: NT = ISD::ATOMIC_LOAD_FSUB; break;
4513   }
4514   AtomicOrdering Ordering = I.getOrdering();
4515   SyncScope::ID SSID = I.getSyncScopeID();
4516 
4517   SDValue InChain = getRoot();
4518 
4519   auto MemVT = getValue(I.getValOperand()).getSimpleValueType();
4520   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4521   auto Flags = TLI.getAtomicMemOperandFlags(I, DAG.getDataLayout());
4522 
4523   MachineFunction &MF = DAG.getMachineFunction();
4524   MachineMemOperand *MMO = MF.getMachineMemOperand(
4525       MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4526       DAG.getEVTAlign(MemVT), AAMDNodes(), nullptr, SSID, Ordering);
4527 
4528   SDValue L =
4529     DAG.getAtomic(NT, dl, MemVT, InChain,
4530                   getValue(I.getPointerOperand()), getValue(I.getValOperand()),
4531                   MMO);
4532 
4533   SDValue OutChain = L.getValue(1);
4534 
4535   setValue(&I, L);
4536   DAG.setRoot(OutChain);
4537 }
4538 
4539 void SelectionDAGBuilder::visitFence(const FenceInst &I) {
4540   SDLoc dl = getCurSDLoc();
4541   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4542   SDValue Ops[3];
4543   Ops[0] = getRoot();
4544   Ops[1] = DAG.getTargetConstant((unsigned)I.getOrdering(), dl,
4545                                  TLI.getFenceOperandTy(DAG.getDataLayout()));
4546   Ops[2] = DAG.getTargetConstant(I.getSyncScopeID(), dl,
4547                                  TLI.getFenceOperandTy(DAG.getDataLayout()));
4548   DAG.setRoot(DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops));
4549 }
4550 
4551 void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
4552   SDLoc dl = getCurSDLoc();
4553   AtomicOrdering Order = I.getOrdering();
4554   SyncScope::ID SSID = I.getSyncScopeID();
4555 
4556   SDValue InChain = getRoot();
4557 
4558   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4559   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4560   EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getType());
4561 
4562   if (!TLI.supportsUnalignedAtomics() &&
4563       I.getAlignment() < MemVT.getSizeInBits() / 8)
4564     report_fatal_error("Cannot generate unaligned atomic load");
4565 
4566   auto Flags = TLI.getLoadMemOperandFlags(I, DAG.getDataLayout());
4567 
4568   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4569       MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4570       I.getAlign(), AAMDNodes(), nullptr, SSID, Order);
4571 
4572   InChain = TLI.prepareVolatileOrAtomicLoad(InChain, dl, DAG);
4573 
4574   SDValue Ptr = getValue(I.getPointerOperand());
4575 
4576   if (TLI.lowerAtomicLoadAsLoadSDNode(I)) {
4577     // TODO: Once this is better exercised by tests, it should be merged with
4578     // the normal path for loads to prevent future divergence.
4579     SDValue L = DAG.getLoad(MemVT, dl, InChain, Ptr, MMO);
4580     if (MemVT != VT)
4581       L = DAG.getPtrExtOrTrunc(L, dl, VT);
4582 
4583     setValue(&I, L);
4584     SDValue OutChain = L.getValue(1);
4585     if (!I.isUnordered())
4586       DAG.setRoot(OutChain);
4587     else
4588       PendingLoads.push_back(OutChain);
4589     return;
4590   }
4591 
4592   SDValue L = DAG.getAtomic(ISD::ATOMIC_LOAD, dl, MemVT, MemVT, InChain,
4593                             Ptr, MMO);
4594 
4595   SDValue OutChain = L.getValue(1);
4596   if (MemVT != VT)
4597     L = DAG.getPtrExtOrTrunc(L, dl, VT);
4598 
4599   setValue(&I, L);
4600   DAG.setRoot(OutChain);
4601 }
4602 
4603 void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
4604   SDLoc dl = getCurSDLoc();
4605 
4606   AtomicOrdering Ordering = I.getOrdering();
4607   SyncScope::ID SSID = I.getSyncScopeID();
4608 
4609   SDValue InChain = getRoot();
4610 
4611   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4612   EVT MemVT =
4613       TLI.getMemValueType(DAG.getDataLayout(), I.getValueOperand()->getType());
4614 
4615   if (I.getAlignment() < MemVT.getSizeInBits() / 8)
4616     report_fatal_error("Cannot generate unaligned atomic store");
4617 
4618   auto Flags = TLI.getStoreMemOperandFlags(I, DAG.getDataLayout());
4619 
4620   MachineFunction &MF = DAG.getMachineFunction();
4621   MachineMemOperand *MMO = MF.getMachineMemOperand(
4622       MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4623       I.getAlign(), AAMDNodes(), nullptr, SSID, Ordering);
4624 
4625   SDValue Val = getValue(I.getValueOperand());
4626   if (Val.getValueType() != MemVT)
4627     Val = DAG.getPtrExtOrTrunc(Val, dl, MemVT);
4628   SDValue Ptr = getValue(I.getPointerOperand());
4629 
4630   if (TLI.lowerAtomicStoreAsStoreSDNode(I)) {
4631     // TODO: Once this is better exercised by tests, it should be merged with
4632     // the normal path for stores to prevent future divergence.
4633     SDValue S = DAG.getStore(InChain, dl, Val, Ptr, MMO);
4634     DAG.setRoot(S);
4635     return;
4636   }
4637   SDValue OutChain = DAG.getAtomic(ISD::ATOMIC_STORE, dl, MemVT, InChain,
4638                                    Ptr, Val, MMO);
4639 
4640 
4641   DAG.setRoot(OutChain);
4642 }
4643 
4644 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
4645 /// node.
4646 void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
4647                                                unsigned Intrinsic) {
4648   // Ignore the callsite's attributes. A specific call site may be marked with
4649   // readnone, but the lowering code will expect the chain based on the
4650   // definition.
4651   const Function *F = I.getCalledFunction();
4652   bool HasChain = !F->doesNotAccessMemory();
4653   bool OnlyLoad = HasChain && F->onlyReadsMemory();
4654 
4655   // Build the operand list.
4656   SmallVector<SDValue, 8> Ops;
4657   if (HasChain) {  // If this intrinsic has side-effects, chainify it.
4658     if (OnlyLoad) {
4659       // We don't need to serialize loads against other loads.
4660       Ops.push_back(DAG.getRoot());
4661     } else {
4662       Ops.push_back(getRoot());
4663     }
4664   }
4665 
4666   // Info is set by getTgtMemInstrinsic
4667   TargetLowering::IntrinsicInfo Info;
4668   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4669   bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I,
4670                                                DAG.getMachineFunction(),
4671                                                Intrinsic);
4672 
4673   // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
4674   if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID ||
4675       Info.opc == ISD::INTRINSIC_W_CHAIN)
4676     Ops.push_back(DAG.getTargetConstant(Intrinsic, getCurSDLoc(),
4677                                         TLI.getPointerTy(DAG.getDataLayout())));
4678 
4679   // Add all operands of the call to the operand list.
4680   for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
4681     const Value *Arg = I.getArgOperand(i);
4682     if (!I.paramHasAttr(i, Attribute::ImmArg)) {
4683       Ops.push_back(getValue(Arg));
4684       continue;
4685     }
4686 
4687     // Use TargetConstant instead of a regular constant for immarg.
4688     EVT VT = TLI.getValueType(*DL, Arg->getType(), true);
4689     if (const ConstantInt *CI = dyn_cast<ConstantInt>(Arg)) {
4690       assert(CI->getBitWidth() <= 64 &&
4691              "large intrinsic immediates not handled");
4692       Ops.push_back(DAG.getTargetConstant(*CI, SDLoc(), VT));
4693     } else {
4694       Ops.push_back(
4695           DAG.getTargetConstantFP(*cast<ConstantFP>(Arg), SDLoc(), VT));
4696     }
4697   }
4698 
4699   SmallVector<EVT, 4> ValueVTs;
4700   ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);
4701 
4702   if (HasChain)
4703     ValueVTs.push_back(MVT::Other);
4704 
4705   SDVTList VTs = DAG.getVTList(ValueVTs);
4706 
4707   // Create the node.
4708   SDValue Result;
4709   if (IsTgtIntrinsic) {
4710     // This is target intrinsic that touches memory
4711     AAMDNodes AAInfo;
4712     I.getAAMetadata(AAInfo);
4713     Result =
4714         DAG.getMemIntrinsicNode(Info.opc, getCurSDLoc(), VTs, Ops, Info.memVT,
4715                                 MachinePointerInfo(Info.ptrVal, Info.offset),
4716                                 Info.align, Info.flags, Info.size, AAInfo);
4717   } else if (!HasChain) {
4718     Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurSDLoc(), VTs, Ops);
4719   } else if (!I.getType()->isVoidTy()) {
4720     Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurSDLoc(), VTs, Ops);
4721   } else {
4722     Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops);
4723   }
4724 
4725   if (HasChain) {
4726     SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
4727     if (OnlyLoad)
4728       PendingLoads.push_back(Chain);
4729     else
4730       DAG.setRoot(Chain);
4731   }
4732 
4733   if (!I.getType()->isVoidTy()) {
4734     if (VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
4735       EVT VT = TLI.getValueType(DAG.getDataLayout(), PTy);
4736       Result = DAG.getNode(ISD::BITCAST, getCurSDLoc(), VT, Result);
4737     } else
4738       Result = lowerRangeToAssertZExt(DAG, I, Result);
4739 
4740     setValue(&I, Result);
4741   }
4742 }
4743 
4744 /// GetSignificand - Get the significand and build it into a floating-point
4745 /// number with exponent of 1:
4746 ///
4747 ///   Op = (Op & 0x007fffff) | 0x3f800000;
4748 ///
4749 /// where Op is the hexadecimal representation of floating point value.
4750 static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl) {
4751   SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
4752                            DAG.getConstant(0x007fffff, dl, MVT::i32));
4753   SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
4754                            DAG.getConstant(0x3f800000, dl, MVT::i32));
4755   return DAG.getNode(ISD::BITCAST, dl, MVT::f32, t2);
4756 }
4757 
4758 /// GetExponent - Get the exponent:
4759 ///
4760 ///   (float)(int)(((Op & 0x7f800000) >> 23) - 127);
4761 ///
4762 /// where Op is the hexadecimal representation of floating point value.
4763 static SDValue GetExponent(SelectionDAG &DAG, SDValue Op,
4764                            const TargetLowering &TLI, const SDLoc &dl) {
4765   SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
4766                            DAG.getConstant(0x7f800000, dl, MVT::i32));
4767   SDValue t1 = DAG.getNode(
4768       ISD::SRL, dl, MVT::i32, t0,
4769       DAG.getConstant(23, dl, TLI.getPointerTy(DAG.getDataLayout())));
4770   SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
4771                            DAG.getConstant(127, dl, MVT::i32));
4772   return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
4773 }
4774 
4775 /// getF32Constant - Get 32-bit floating point constant.
4776 static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt,
4777                               const SDLoc &dl) {
4778   return DAG.getConstantFP(APFloat(APFloat::IEEEsingle(), APInt(32, Flt)), dl,
4779                            MVT::f32);
4780 }
4781 
4782 static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl,
4783                                        SelectionDAG &DAG) {
4784   // TODO: What fast-math-flags should be set on the floating-point nodes?
4785 
4786   //   IntegerPartOfX = ((int32_t)(t0);
4787   SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
4788 
4789   //   FractionalPartOfX = t0 - (float)IntegerPartOfX;
4790   SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
4791   SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
4792 
4793   //   IntegerPartOfX <<= 23;
4794   IntegerPartOfX = DAG.getNode(
4795       ISD::SHL, dl, MVT::i32, IntegerPartOfX,
4796       DAG.getConstant(23, dl, DAG.getTargetLoweringInfo().getPointerTy(
4797                                   DAG.getDataLayout())));
4798 
4799   SDValue TwoToFractionalPartOfX;
4800   if (LimitFloatPrecision <= 6) {
4801     // For floating-point precision of 6:
4802     //
4803     //   TwoToFractionalPartOfX =
4804     //     0.997535578f +
4805     //       (0.735607626f + 0.252464424f * x) * x;
4806     //
4807     // error 0.0144103317, which is 6 bits
4808     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4809                              getF32Constant(DAG, 0x3e814304, dl));
4810     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4811                              getF32Constant(DAG, 0x3f3c50c8, dl));
4812     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4813     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4814                                          getF32Constant(DAG, 0x3f7f5e7e, dl));
4815   } else if (LimitFloatPrecision <= 12) {
4816     // For floating-point precision of 12:
4817     //
4818     //   TwoToFractionalPartOfX =
4819     //     0.999892986f +
4820     //       (0.696457318f +
4821     //         (0.224338339f + 0.792043434e-1f * x) * x) * x;
4822     //
4823     // error 0.000107046256, which is 13 to 14 bits
4824     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4825                              getF32Constant(DAG, 0x3da235e3, dl));
4826     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4827                              getF32Constant(DAG, 0x3e65b8f3, dl));
4828     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4829     SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4830                              getF32Constant(DAG, 0x3f324b07, dl));
4831     SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4832     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4833                                          getF32Constant(DAG, 0x3f7ff8fd, dl));
4834   } else { // LimitFloatPrecision <= 18
4835     // For floating-point precision of 18:
4836     //
4837     //   TwoToFractionalPartOfX =
4838     //     0.999999982f +
4839     //       (0.693148872f +
4840     //         (0.240227044f +
4841     //           (0.554906021e-1f +
4842     //             (0.961591928e-2f +
4843     //               (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
4844     // error 2.47208000*10^(-7), which is better than 18 bits
4845     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4846                              getF32Constant(DAG, 0x3924b03e, dl));
4847     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4848                              getF32Constant(DAG, 0x3ab24b87, dl));
4849     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4850     SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4851                              getF32Constant(DAG, 0x3c1d8c17, dl));
4852     SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4853     SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4854                              getF32Constant(DAG, 0x3d634a1d, dl));
4855     SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4856     SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4857                              getF32Constant(DAG, 0x3e75fe14, dl));
4858     SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4859     SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
4860                               getF32Constant(DAG, 0x3f317234, dl));
4861     SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
4862     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
4863                                          getF32Constant(DAG, 0x3f800000, dl));
4864   }
4865 
4866   // Add the exponent into the result in integer domain.
4867   SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, TwoToFractionalPartOfX);
4868   return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
4869                      DAG.getNode(ISD::ADD, dl, MVT::i32, t13, IntegerPartOfX));
4870 }
4871 
4872 /// expandExp - Lower an exp intrinsic. Handles the special sequences for
4873 /// limited-precision mode.
4874 static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4875                          const TargetLowering &TLI) {
4876   if (Op.getValueType() == MVT::f32 &&
4877       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4878 
4879     // Put the exponent in the right bit position for later addition to the
4880     // final result:
4881     //
4882     // t0 = Op * log2(e)
4883 
4884     // TODO: What fast-math-flags should be set here?
4885     SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
4886                              DAG.getConstantFP(numbers::log2ef, dl, MVT::f32));
4887     return getLimitedPrecisionExp2(t0, dl, DAG);
4888   }
4889 
4890   // No special expansion.
4891   return DAG.getNode(ISD::FEXP, dl, Op.getValueType(), Op);
4892 }
4893 
4894 /// expandLog - Lower a log intrinsic. Handles the special sequences for
4895 /// limited-precision mode.
4896 static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4897                          const TargetLowering &TLI) {
4898   // TODO: What fast-math-flags should be set on the floating-point nodes?
4899 
4900   if (Op.getValueType() == MVT::f32 &&
4901       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4902     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
4903 
4904     // Scale the exponent by log(2).
4905     SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
4906     SDValue LogOfExponent =
4907         DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
4908                     DAG.getConstantFP(numbers::ln2f, dl, MVT::f32));
4909 
4910     // Get the significand and build it into a floating-point number with
4911     // exponent of 1.
4912     SDValue X = GetSignificand(DAG, Op1, dl);
4913 
4914     SDValue LogOfMantissa;
4915     if (LimitFloatPrecision <= 6) {
4916       // For floating-point precision of 6:
4917       //
4918       //   LogofMantissa =
4919       //     -1.1609546f +
4920       //       (1.4034025f - 0.23903021f * x) * x;
4921       //
4922       // error 0.0034276066, which is better than 8 bits
4923       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4924                                getF32Constant(DAG, 0xbe74c456, dl));
4925       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4926                                getF32Constant(DAG, 0x3fb3a2b1, dl));
4927       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4928       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4929                                   getF32Constant(DAG, 0x3f949a29, dl));
4930     } else if (LimitFloatPrecision <= 12) {
4931       // For floating-point precision of 12:
4932       //
4933       //   LogOfMantissa =
4934       //     -1.7417939f +
4935       //       (2.8212026f +
4936       //         (-1.4699568f +
4937       //           (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
4938       //
4939       // error 0.000061011436, which is 14 bits
4940       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4941                                getF32Constant(DAG, 0xbd67b6d6, dl));
4942       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4943                                getF32Constant(DAG, 0x3ee4f4b8, dl));
4944       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4945       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4946                                getF32Constant(DAG, 0x3fbc278b, dl));
4947       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4948       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4949                                getF32Constant(DAG, 0x40348e95, dl));
4950       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4951       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4952                                   getF32Constant(DAG, 0x3fdef31a, dl));
4953     } else { // LimitFloatPrecision <= 18
4954       // For floating-point precision of 18:
4955       //
4956       //   LogOfMantissa =
4957       //     -2.1072184f +
4958       //       (4.2372794f +
4959       //         (-3.7029485f +
4960       //           (2.2781945f +
4961       //             (-0.87823314f +
4962       //               (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
4963       //
4964       // error 0.0000023660568, which is better than 18 bits
4965       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4966                                getF32Constant(DAG, 0xbc91e5ac, dl));
4967       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4968                                getF32Constant(DAG, 0x3e4350aa, dl));
4969       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4970       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4971                                getF32Constant(DAG, 0x3f60d3e3, dl));
4972       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4973       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4974                                getF32Constant(DAG, 0x4011cdf0, dl));
4975       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4976       SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4977                                getF32Constant(DAG, 0x406cfd1c, dl));
4978       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4979       SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4980                                getF32Constant(DAG, 0x408797cb, dl));
4981       SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4982       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
4983                                   getF32Constant(DAG, 0x4006dcab, dl));
4984     }
4985 
4986     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, LogOfMantissa);
4987   }
4988 
4989   // No special expansion.
4990   return DAG.getNode(ISD::FLOG, dl, Op.getValueType(), Op);
4991 }
4992 
4993 /// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for
4994 /// limited-precision mode.
4995 static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4996                           const TargetLowering &TLI) {
4997   // TODO: What fast-math-flags should be set on the floating-point nodes?
4998 
4999   if (Op.getValueType() == MVT::f32 &&
5000       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5001     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5002 
5003     // Get the exponent.
5004     SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
5005 
5006     // Get the significand and build it into a floating-point number with
5007     // exponent of 1.
5008     SDValue X = GetSignificand(DAG, Op1, dl);
5009 
5010     // Different possible minimax approximations of significand in
5011     // floating-point for various degrees of accuracy over [1,2].
5012     SDValue Log2ofMantissa;
5013     if (LimitFloatPrecision <= 6) {
5014       // For floating-point precision of 6:
5015       //
5016       //   Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
5017       //
5018       // error 0.0049451742, which is more than 7 bits
5019       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5020                                getF32Constant(DAG, 0xbeb08fe0, dl));
5021       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5022                                getF32Constant(DAG, 0x40019463, dl));
5023       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5024       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5025                                    getF32Constant(DAG, 0x3fd6633d, dl));
5026     } else if (LimitFloatPrecision <= 12) {
5027       // For floating-point precision of 12:
5028       //
5029       //   Log2ofMantissa =
5030       //     -2.51285454f +
5031       //       (4.07009056f +
5032       //         (-2.12067489f +
5033       //           (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
5034       //
5035       // error 0.0000876136000, which is better than 13 bits
5036       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5037                                getF32Constant(DAG, 0xbda7262e, dl));
5038       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5039                                getF32Constant(DAG, 0x3f25280b, dl));
5040       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5041       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5042                                getF32Constant(DAG, 0x4007b923, dl));
5043       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5044       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5045                                getF32Constant(DAG, 0x40823e2f, dl));
5046       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5047       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5048                                    getF32Constant(DAG, 0x4020d29c, dl));
5049     } else { // LimitFloatPrecision <= 18
5050       // For floating-point precision of 18:
5051       //
5052       //   Log2ofMantissa =
5053       //     -3.0400495f +
5054       //       (6.1129976f +
5055       //         (-5.3420409f +
5056       //           (3.2865683f +
5057       //             (-1.2669343f +
5058       //               (0.27515199f -
5059       //                 0.25691327e-1f * x) * x) * x) * x) * x) * x;
5060       //
5061       // error 0.0000018516, which is better than 18 bits
5062       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5063                                getF32Constant(DAG, 0xbcd2769e, dl));
5064       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5065                                getF32Constant(DAG, 0x3e8ce0b9, dl));
5066       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5067       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5068                                getF32Constant(DAG, 0x3fa22ae7, dl));
5069       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5070       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5071                                getF32Constant(DAG, 0x40525723, dl));
5072       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5073       SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5074                                getF32Constant(DAG, 0x40aaf200, dl));
5075       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5076       SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5077                                getF32Constant(DAG, 0x40c39dad, dl));
5078       SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5079       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
5080                                    getF32Constant(DAG, 0x4042902c, dl));
5081     }
5082 
5083     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
5084   }
5085 
5086   // No special expansion.
5087   return DAG.getNode(ISD::FLOG2, dl, Op.getValueType(), Op);
5088 }
5089 
5090 /// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for
5091 /// limited-precision mode.
5092 static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5093                            const TargetLowering &TLI) {
5094   // TODO: What fast-math-flags should be set on the floating-point nodes?
5095 
5096   if (Op.getValueType() == MVT::f32 &&
5097       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5098     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5099 
5100     // Scale the exponent by log10(2) [0.30102999f].
5101     SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
5102     SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
5103                                         getF32Constant(DAG, 0x3e9a209a, dl));
5104 
5105     // Get the significand and build it into a floating-point number with
5106     // exponent of 1.
5107     SDValue X = GetSignificand(DAG, Op1, dl);
5108 
5109     SDValue Log10ofMantissa;
5110     if (LimitFloatPrecision <= 6) {
5111       // For floating-point precision of 6:
5112       //
5113       //   Log10ofMantissa =
5114       //     -0.50419619f +
5115       //       (0.60948995f - 0.10380950f * x) * x;
5116       //
5117       // error 0.0014886165, which is 6 bits
5118       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5119                                getF32Constant(DAG, 0xbdd49a13, dl));
5120       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5121                                getF32Constant(DAG, 0x3f1c0789, dl));
5122       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5123       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5124                                     getF32Constant(DAG, 0x3f011300, dl));
5125     } else if (LimitFloatPrecision <= 12) {
5126       // For floating-point precision of 12:
5127       //
5128       //   Log10ofMantissa =
5129       //     -0.64831180f +
5130       //       (0.91751397f +
5131       //         (-0.31664806f + 0.47637168e-1f * x) * x) * x;
5132       //
5133       // error 0.00019228036, which is better than 12 bits
5134       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5135                                getF32Constant(DAG, 0x3d431f31, dl));
5136       SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
5137                                getF32Constant(DAG, 0x3ea21fb2, dl));
5138       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5139       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5140                                getF32Constant(DAG, 0x3f6ae232, dl));
5141       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5142       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
5143                                     getF32Constant(DAG, 0x3f25f7c3, dl));
5144     } else { // LimitFloatPrecision <= 18
5145       // For floating-point precision of 18:
5146       //
5147       //   Log10ofMantissa =
5148       //     -0.84299375f +
5149       //       (1.5327582f +
5150       //         (-1.0688956f +
5151       //           (0.49102474f +
5152       //             (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
5153       //
5154       // error 0.0000037995730, which is better than 18 bits
5155       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5156                                getF32Constant(DAG, 0x3c5d51ce, dl));
5157       SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
5158                                getF32Constant(DAG, 0x3e00685a, dl));
5159       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5160       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5161                                getF32Constant(DAG, 0x3efb6798, dl));
5162       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5163       SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
5164                                getF32Constant(DAG, 0x3f88d192, dl));
5165       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5166       SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5167                                getF32Constant(DAG, 0x3fc4316c, dl));
5168       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5169       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
5170                                     getF32Constant(DAG, 0x3f57ce70, dl));
5171     }
5172 
5173     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
5174   }
5175 
5176   // No special expansion.
5177   return DAG.getNode(ISD::FLOG10, dl, Op.getValueType(), Op);
5178 }
5179 
5180 /// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for
5181 /// limited-precision mode.
5182 static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5183                           const TargetLowering &TLI) {
5184   if (Op.getValueType() == MVT::f32 &&
5185       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18)
5186     return getLimitedPrecisionExp2(Op, dl, DAG);
5187 
5188   // No special expansion.
5189   return DAG.getNode(ISD::FEXP2, dl, Op.getValueType(), Op);
5190 }
5191 
5192 /// visitPow - Lower a pow intrinsic. Handles the special sequences for
5193 /// limited-precision mode with x == 10.0f.
5194 static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS,
5195                          SelectionDAG &DAG, const TargetLowering &TLI) {
5196   bool IsExp10 = false;
5197   if (LHS.getValueType() == MVT::f32 && RHS.getValueType() == MVT::f32 &&
5198       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5199     if (ConstantFPSDNode *LHSC = dyn_cast<ConstantFPSDNode>(LHS)) {
5200       APFloat Ten(10.0f);
5201       IsExp10 = LHSC->isExactlyValue(Ten);
5202     }
5203   }
5204 
5205   // TODO: What fast-math-flags should be set on the FMUL node?
5206   if (IsExp10) {
5207     // Put the exponent in the right bit position for later addition to the
5208     // final result:
5209     //
5210     //   #define LOG2OF10 3.3219281f
5211     //   t0 = Op * LOG2OF10;
5212     SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, RHS,
5213                              getF32Constant(DAG, 0x40549a78, dl));
5214     return getLimitedPrecisionExp2(t0, dl, DAG);
5215   }
5216 
5217   // No special expansion.
5218   return DAG.getNode(ISD::FPOW, dl, LHS.getValueType(), LHS, RHS);
5219 }
5220 
5221 /// ExpandPowI - Expand a llvm.powi intrinsic.
5222 static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS,
5223                           SelectionDAG &DAG) {
5224   // If RHS is a constant, we can expand this out to a multiplication tree,
5225   // otherwise we end up lowering to a call to __powidf2 (for example).  When
5226   // optimizing for size, we only want to do this if the expansion would produce
5227   // a small number of multiplies, otherwise we do the full expansion.
5228   if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
5229     // Get the exponent as a positive value.
5230     unsigned Val = RHSC->getSExtValue();
5231     if ((int)Val < 0) Val = -Val;
5232 
5233     // powi(x, 0) -> 1.0
5234     if (Val == 0)
5235       return DAG.getConstantFP(1.0, DL, LHS.getValueType());
5236 
5237     bool OptForSize = DAG.shouldOptForSize();
5238     if (!OptForSize ||
5239         // If optimizing for size, don't insert too many multiplies.
5240         // This inserts up to 5 multiplies.
5241         countPopulation(Val) + Log2_32(Val) < 7) {
5242       // We use the simple binary decomposition method to generate the multiply
5243       // sequence.  There are more optimal ways to do this (for example,
5244       // powi(x,15) generates one more multiply than it should), but this has
5245       // the benefit of being both really simple and much better than a libcall.
5246       SDValue Res;  // Logically starts equal to 1.0
5247       SDValue CurSquare = LHS;
5248       // TODO: Intrinsics should have fast-math-flags that propagate to these
5249       // nodes.
5250       while (Val) {
5251         if (Val & 1) {
5252           if (Res.getNode())
5253             Res = DAG.getNode(ISD::FMUL, DL,Res.getValueType(), Res, CurSquare);
5254           else
5255             Res = CurSquare;  // 1.0*CurSquare.
5256         }
5257 
5258         CurSquare = DAG.getNode(ISD::FMUL, DL, CurSquare.getValueType(),
5259                                 CurSquare, CurSquare);
5260         Val >>= 1;
5261       }
5262 
5263       // If the original was negative, invert the result, producing 1/(x*x*x).
5264       if (RHSC->getSExtValue() < 0)
5265         Res = DAG.getNode(ISD::FDIV, DL, LHS.getValueType(),
5266                           DAG.getConstantFP(1.0, DL, LHS.getValueType()), Res);
5267       return Res;
5268     }
5269   }
5270 
5271   // Otherwise, expand to a libcall.
5272   return DAG.getNode(ISD::FPOWI, DL, LHS.getValueType(), LHS, RHS);
5273 }
5274 
5275 static SDValue expandDivFix(unsigned Opcode, const SDLoc &DL,
5276                             SDValue LHS, SDValue RHS, SDValue Scale,
5277                             SelectionDAG &DAG, const TargetLowering &TLI) {
5278   EVT VT = LHS.getValueType();
5279   bool Signed = Opcode == ISD::SDIVFIX || Opcode == ISD::SDIVFIXSAT;
5280   bool Saturating = Opcode == ISD::SDIVFIXSAT || Opcode == ISD::UDIVFIXSAT;
5281   LLVMContext &Ctx = *DAG.getContext();
5282 
5283   // If the type is legal but the operation isn't, this node might survive all
5284   // the way to operation legalization. If we end up there and we do not have
5285   // the ability to widen the type (if VT*2 is not legal), we cannot expand the
5286   // node.
5287 
5288   // Coax the legalizer into expanding the node during type legalization instead
5289   // by bumping the size by one bit. This will force it to Promote, enabling the
5290   // early expansion and avoiding the need to expand later.
5291 
5292   // We don't have to do this if Scale is 0; that can always be expanded, unless
5293   // it's a saturating signed operation. Those can experience true integer
5294   // division overflow, a case which we must avoid.
5295 
5296   // FIXME: We wouldn't have to do this (or any of the early
5297   // expansion/promotion) if it was possible to expand a libcall of an
5298   // illegal type during operation legalization. But it's not, so things
5299   // get a bit hacky.
5300   unsigned ScaleInt = cast<ConstantSDNode>(Scale)->getZExtValue();
5301   if ((ScaleInt > 0 || (Saturating && Signed)) &&
5302       (TLI.isTypeLegal(VT) ||
5303        (VT.isVector() && TLI.isTypeLegal(VT.getVectorElementType())))) {
5304     TargetLowering::LegalizeAction Action = TLI.getFixedPointOperationAction(
5305         Opcode, VT, ScaleInt);
5306     if (Action != TargetLowering::Legal && Action != TargetLowering::Custom) {
5307       EVT PromVT;
5308       if (VT.isScalarInteger())
5309         PromVT = EVT::getIntegerVT(Ctx, VT.getSizeInBits() + 1);
5310       else if (VT.isVector()) {
5311         PromVT = VT.getVectorElementType();
5312         PromVT = EVT::getIntegerVT(Ctx, PromVT.getSizeInBits() + 1);
5313         PromVT = EVT::getVectorVT(Ctx, PromVT, VT.getVectorElementCount());
5314       } else
5315         llvm_unreachable("Wrong VT for DIVFIX?");
5316       if (Signed) {
5317         LHS = DAG.getSExtOrTrunc(LHS, DL, PromVT);
5318         RHS = DAG.getSExtOrTrunc(RHS, DL, PromVT);
5319       } else {
5320         LHS = DAG.getZExtOrTrunc(LHS, DL, PromVT);
5321         RHS = DAG.getZExtOrTrunc(RHS, DL, PromVT);
5322       }
5323       EVT ShiftTy = TLI.getShiftAmountTy(PromVT, DAG.getDataLayout());
5324       // For saturating operations, we need to shift up the LHS to get the
5325       // proper saturation width, and then shift down again afterwards.
5326       if (Saturating)
5327         LHS = DAG.getNode(ISD::SHL, DL, PromVT, LHS,
5328                           DAG.getConstant(1, DL, ShiftTy));
5329       SDValue Res = DAG.getNode(Opcode, DL, PromVT, LHS, RHS, Scale);
5330       if (Saturating)
5331         Res = DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, PromVT, Res,
5332                           DAG.getConstant(1, DL, ShiftTy));
5333       return DAG.getZExtOrTrunc(Res, DL, VT);
5334     }
5335   }
5336 
5337   return DAG.getNode(Opcode, DL, VT, LHS, RHS, Scale);
5338 }
5339 
5340 // getUnderlyingArgRegs - Find underlying registers used for a truncated,
5341 // bitcasted, or split argument. Returns a list of <Register, size in bits>
5342 static void
5343 getUnderlyingArgRegs(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
5344                      const SDValue &N) {
5345   switch (N.getOpcode()) {
5346   case ISD::CopyFromReg: {
5347     SDValue Op = N.getOperand(1);
5348     Regs.emplace_back(cast<RegisterSDNode>(Op)->getReg(),
5349                       Op.getValueType().getSizeInBits());
5350     return;
5351   }
5352   case ISD::BITCAST:
5353   case ISD::AssertZext:
5354   case ISD::AssertSext:
5355   case ISD::TRUNCATE:
5356     getUnderlyingArgRegs(Regs, N.getOperand(0));
5357     return;
5358   case ISD::BUILD_PAIR:
5359   case ISD::BUILD_VECTOR:
5360   case ISD::CONCAT_VECTORS:
5361     for (SDValue Op : N->op_values())
5362       getUnderlyingArgRegs(Regs, Op);
5363     return;
5364   default:
5365     return;
5366   }
5367 }
5368 
5369 /// If the DbgValueInst is a dbg_value of a function argument, create the
5370 /// corresponding DBG_VALUE machine instruction for it now.  At the end of
5371 /// instruction selection, they will be inserted to the entry BB.
5372 bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
5373     const Value *V, DILocalVariable *Variable, DIExpression *Expr,
5374     DILocation *DL, bool IsDbgDeclare, const SDValue &N) {
5375   const Argument *Arg = dyn_cast<Argument>(V);
5376   if (!Arg)
5377     return false;
5378 
5379   if (!IsDbgDeclare) {
5380     // ArgDbgValues are hoisted to the beginning of the entry block. So we
5381     // should only emit as ArgDbgValue if the dbg.value intrinsic is found in
5382     // the entry block.
5383     bool IsInEntryBlock = FuncInfo.MBB == &FuncInfo.MF->front();
5384     if (!IsInEntryBlock)
5385       return false;
5386 
5387     // ArgDbgValues are hoisted to the beginning of the entry block.  So we
5388     // should only emit as ArgDbgValue if the dbg.value intrinsic describes a
5389     // variable that also is a param.
5390     //
5391     // Although, if we are at the top of the entry block already, we can still
5392     // emit using ArgDbgValue. This might catch some situations when the
5393     // dbg.value refers to an argument that isn't used in the entry block, so
5394     // any CopyToReg node would be optimized out and the only way to express
5395     // this DBG_VALUE is by using the physical reg (or FI) as done in this
5396     // method.  ArgDbgValues are hoisted to the beginning of the entry block. So
5397     // we should only emit as ArgDbgValue if the Variable is an argument to the
5398     // current function, and the dbg.value intrinsic is found in the entry
5399     // block.
5400     bool VariableIsFunctionInputArg = Variable->isParameter() &&
5401         !DL->getInlinedAt();
5402     bool IsInPrologue = SDNodeOrder == LowestSDNodeOrder;
5403     if (!IsInPrologue && !VariableIsFunctionInputArg)
5404       return false;
5405 
5406     // Here we assume that a function argument on IR level only can be used to
5407     // describe one input parameter on source level. If we for example have
5408     // source code like this
5409     //
5410     //    struct A { long x, y; };
5411     //    void foo(struct A a, long b) {
5412     //      ...
5413     //      b = a.x;
5414     //      ...
5415     //    }
5416     //
5417     // and IR like this
5418     //
5419     //  define void @foo(i32 %a1, i32 %a2, i32 %b)  {
5420     //  entry:
5421     //    call void @llvm.dbg.value(metadata i32 %a1, "a", DW_OP_LLVM_fragment
5422     //    call void @llvm.dbg.value(metadata i32 %a2, "a", DW_OP_LLVM_fragment
5423     //    call void @llvm.dbg.value(metadata i32 %b, "b",
5424     //    ...
5425     //    call void @llvm.dbg.value(metadata i32 %a1, "b"
5426     //    ...
5427     //
5428     // then the last dbg.value is describing a parameter "b" using a value that
5429     // is an argument. But since we already has used %a1 to describe a parameter
5430     // we should not handle that last dbg.value here (that would result in an
5431     // incorrect hoisting of the DBG_VALUE to the function entry).
5432     // Notice that we allow one dbg.value per IR level argument, to accommodate
5433     // for the situation with fragments above.
5434     if (VariableIsFunctionInputArg) {
5435       unsigned ArgNo = Arg->getArgNo();
5436       if (ArgNo >= FuncInfo.DescribedArgs.size())
5437         FuncInfo.DescribedArgs.resize(ArgNo + 1, false);
5438       else if (!IsInPrologue && FuncInfo.DescribedArgs.test(ArgNo))
5439         return false;
5440       FuncInfo.DescribedArgs.set(ArgNo);
5441     }
5442   }
5443 
5444   MachineFunction &MF = DAG.getMachineFunction();
5445   const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
5446 
5447   bool IsIndirect = false;
5448   Optional<MachineOperand> Op;
5449   // Some arguments' frame index is recorded during argument lowering.
5450   int FI = FuncInfo.getArgumentFrameIndex(Arg);
5451   if (FI != std::numeric_limits<int>::max())
5452     Op = MachineOperand::CreateFI(FI);
5453 
5454   SmallVector<std::pair<unsigned, unsigned>, 8> ArgRegsAndSizes;
5455   if (!Op && N.getNode()) {
5456     getUnderlyingArgRegs(ArgRegsAndSizes, N);
5457     Register Reg;
5458     if (ArgRegsAndSizes.size() == 1)
5459       Reg = ArgRegsAndSizes.front().first;
5460 
5461     if (Reg && Reg.isVirtual()) {
5462       MachineRegisterInfo &RegInfo = MF.getRegInfo();
5463       Register PR = RegInfo.getLiveInPhysReg(Reg);
5464       if (PR)
5465         Reg = PR;
5466     }
5467     if (Reg) {
5468       Op = MachineOperand::CreateReg(Reg, false);
5469       IsIndirect = IsDbgDeclare;
5470     }
5471   }
5472 
5473   if (!Op && N.getNode()) {
5474     // Check if frame index is available.
5475     SDValue LCandidate = peekThroughBitcasts(N);
5476     if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(LCandidate.getNode()))
5477       if (FrameIndexSDNode *FINode =
5478           dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
5479         Op = MachineOperand::CreateFI(FINode->getIndex());
5480   }
5481 
5482   if (!Op) {
5483     // Create a DBG_VALUE for each decomposed value in ArgRegs to cover Reg
5484     auto splitMultiRegDbgValue
5485       = [&](ArrayRef<std::pair<unsigned, unsigned>> SplitRegs) {
5486       unsigned Offset = 0;
5487       for (auto RegAndSize : SplitRegs) {
5488         // If the expression is already a fragment, the current register
5489         // offset+size might extend beyond the fragment. In this case, only
5490         // the register bits that are inside the fragment are relevant.
5491         int RegFragmentSizeInBits = RegAndSize.second;
5492         if (auto ExprFragmentInfo = Expr->getFragmentInfo()) {
5493           uint64_t ExprFragmentSizeInBits = ExprFragmentInfo->SizeInBits;
5494           // The register is entirely outside the expression fragment,
5495           // so is irrelevant for debug info.
5496           if (Offset >= ExprFragmentSizeInBits)
5497             break;
5498           // The register is partially outside the expression fragment, only
5499           // the low bits within the fragment are relevant for debug info.
5500           if (Offset + RegFragmentSizeInBits > ExprFragmentSizeInBits) {
5501             RegFragmentSizeInBits = ExprFragmentSizeInBits - Offset;
5502           }
5503         }
5504 
5505         auto FragmentExpr = DIExpression::createFragmentExpression(
5506             Expr, Offset, RegFragmentSizeInBits);
5507         Offset += RegAndSize.second;
5508         // If a valid fragment expression cannot be created, the variable's
5509         // correct value cannot be determined and so it is set as Undef.
5510         if (!FragmentExpr) {
5511           SDDbgValue *SDV = DAG.getConstantDbgValue(
5512               Variable, Expr, UndefValue::get(V->getType()), DL, SDNodeOrder);
5513           DAG.AddDbgValue(SDV, nullptr, false);
5514           continue;
5515         }
5516         assert(!IsDbgDeclare && "DbgDeclare operand is not in memory?");
5517         FuncInfo.ArgDbgValues.push_back(
5518           BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsDbgDeclare,
5519                   RegAndSize.first, Variable, *FragmentExpr));
5520       }
5521     };
5522 
5523     // Check if ValueMap has reg number.
5524     DenseMap<const Value *, Register>::const_iterator
5525       VMI = FuncInfo.ValueMap.find(V);
5526     if (VMI != FuncInfo.ValueMap.end()) {
5527       const auto &TLI = DAG.getTargetLoweringInfo();
5528       RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), VMI->second,
5529                        V->getType(), getABIRegCopyCC(V));
5530       if (RFV.occupiesMultipleRegs()) {
5531         splitMultiRegDbgValue(RFV.getRegsAndSizes());
5532         return true;
5533       }
5534 
5535       Op = MachineOperand::CreateReg(VMI->second, false);
5536       IsIndirect = IsDbgDeclare;
5537     } else if (ArgRegsAndSizes.size() > 1) {
5538       // This was split due to the calling convention, and no virtual register
5539       // mapping exists for the value.
5540       splitMultiRegDbgValue(ArgRegsAndSizes);
5541       return true;
5542     }
5543   }
5544 
5545   if (!Op)
5546     return false;
5547 
5548   assert(Variable->isValidLocationForIntrinsic(DL) &&
5549          "Expected inlined-at fields to agree");
5550   IsIndirect = (Op->isReg()) ? IsIndirect : true;
5551   FuncInfo.ArgDbgValues.push_back(
5552       BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsIndirect,
5553               *Op, Variable, Expr));
5554 
5555   return true;
5556 }
5557 
5558 /// Return the appropriate SDDbgValue based on N.
5559 SDDbgValue *SelectionDAGBuilder::getDbgValue(SDValue N,
5560                                              DILocalVariable *Variable,
5561                                              DIExpression *Expr,
5562                                              const DebugLoc &dl,
5563                                              unsigned DbgSDNodeOrder) {
5564   if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) {
5565     // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can describe
5566     // stack slot locations.
5567     //
5568     // Consider "int x = 0; int *px = &x;". There are two kinds of interesting
5569     // debug values here after optimization:
5570     //
5571     //   dbg.value(i32* %px, !"int *px", !DIExpression()), and
5572     //   dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref))
5573     //
5574     // Both describe the direct values of their associated variables.
5575     return DAG.getFrameIndexDbgValue(Variable, Expr, FISDN->getIndex(),
5576                                      /*IsIndirect*/ false, dl, DbgSDNodeOrder);
5577   }
5578   return DAG.getDbgValue(Variable, Expr, N.getNode(), N.getResNo(),
5579                          /*IsIndirect*/ false, dl, DbgSDNodeOrder);
5580 }
5581 
5582 static unsigned FixedPointIntrinsicToOpcode(unsigned Intrinsic) {
5583   switch (Intrinsic) {
5584   case Intrinsic::smul_fix:
5585     return ISD::SMULFIX;
5586   case Intrinsic::umul_fix:
5587     return ISD::UMULFIX;
5588   case Intrinsic::smul_fix_sat:
5589     return ISD::SMULFIXSAT;
5590   case Intrinsic::umul_fix_sat:
5591     return ISD::UMULFIXSAT;
5592   case Intrinsic::sdiv_fix:
5593     return ISD::SDIVFIX;
5594   case Intrinsic::udiv_fix:
5595     return ISD::UDIVFIX;
5596   case Intrinsic::sdiv_fix_sat:
5597     return ISD::SDIVFIXSAT;
5598   case Intrinsic::udiv_fix_sat:
5599     return ISD::UDIVFIXSAT;
5600   default:
5601     llvm_unreachable("Unhandled fixed point intrinsic");
5602   }
5603 }
5604 
5605 void SelectionDAGBuilder::lowerCallToExternalSymbol(const CallInst &I,
5606                                            const char *FunctionName) {
5607   assert(FunctionName && "FunctionName must not be nullptr");
5608   SDValue Callee = DAG.getExternalSymbol(
5609       FunctionName,
5610       DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()));
5611   LowerCallTo(I, Callee, I.isTailCall());
5612 }
5613 
5614 /// Given a @llvm.call.preallocated.setup, return the corresponding
5615 /// preallocated call.
5616 static const CallBase *FindPreallocatedCall(const Value *PreallocatedSetup) {
5617   assert(cast<CallBase>(PreallocatedSetup)
5618                  ->getCalledFunction()
5619                  ->getIntrinsicID() == Intrinsic::call_preallocated_setup &&
5620          "expected call_preallocated_setup Value");
5621   for (auto *U : PreallocatedSetup->users()) {
5622     auto *UseCall = cast<CallBase>(U);
5623     const Function *Fn = UseCall->getCalledFunction();
5624     if (!Fn || Fn->getIntrinsicID() != Intrinsic::call_preallocated_arg) {
5625       return UseCall;
5626     }
5627   }
5628   llvm_unreachable("expected corresponding call to preallocated setup/arg");
5629 }
5630 
5631 /// Lower the call to the specified intrinsic function.
5632 void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
5633                                              unsigned Intrinsic) {
5634   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5635   SDLoc sdl = getCurSDLoc();
5636   DebugLoc dl = getCurDebugLoc();
5637   SDValue Res;
5638 
5639   switch (Intrinsic) {
5640   default:
5641     // By default, turn this into a target intrinsic node.
5642     visitTargetIntrinsic(I, Intrinsic);
5643     return;
5644   case Intrinsic::vscale: {
5645     match(&I, m_VScale(DAG.getDataLayout()));
5646     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5647     setValue(&I,
5648              DAG.getVScale(getCurSDLoc(), VT, APInt(VT.getSizeInBits(), 1)));
5649     return;
5650   }
5651   case Intrinsic::vastart:  visitVAStart(I); return;
5652   case Intrinsic::vaend:    visitVAEnd(I); return;
5653   case Intrinsic::vacopy:   visitVACopy(I); return;
5654   case Intrinsic::returnaddress:
5655     setValue(&I, DAG.getNode(ISD::RETURNADDR, sdl,
5656                              TLI.getPointerTy(DAG.getDataLayout()),
5657                              getValue(I.getArgOperand(0))));
5658     return;
5659   case Intrinsic::addressofreturnaddress:
5660     setValue(&I, DAG.getNode(ISD::ADDROFRETURNADDR, sdl,
5661                              TLI.getPointerTy(DAG.getDataLayout())));
5662     return;
5663   case Intrinsic::sponentry:
5664     setValue(&I, DAG.getNode(ISD::SPONENTRY, sdl,
5665                              TLI.getFrameIndexTy(DAG.getDataLayout())));
5666     return;
5667   case Intrinsic::frameaddress:
5668     setValue(&I, DAG.getNode(ISD::FRAMEADDR, sdl,
5669                              TLI.getFrameIndexTy(DAG.getDataLayout()),
5670                              getValue(I.getArgOperand(0))));
5671     return;
5672   case Intrinsic::read_register: {
5673     Value *Reg = I.getArgOperand(0);
5674     SDValue Chain = getRoot();
5675     SDValue RegName =
5676         DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
5677     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5678     Res = DAG.getNode(ISD::READ_REGISTER, sdl,
5679       DAG.getVTList(VT, MVT::Other), Chain, RegName);
5680     setValue(&I, Res);
5681     DAG.setRoot(Res.getValue(1));
5682     return;
5683   }
5684   case Intrinsic::write_register: {
5685     Value *Reg = I.getArgOperand(0);
5686     Value *RegValue = I.getArgOperand(1);
5687     SDValue Chain = getRoot();
5688     SDValue RegName =
5689         DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
5690     DAG.setRoot(DAG.getNode(ISD::WRITE_REGISTER, sdl, MVT::Other, Chain,
5691                             RegName, getValue(RegValue)));
5692     return;
5693   }
5694   case Intrinsic::memcpy: {
5695     const auto &MCI = cast<MemCpyInst>(I);
5696     SDValue Op1 = getValue(I.getArgOperand(0));
5697     SDValue Op2 = getValue(I.getArgOperand(1));
5698     SDValue Op3 = getValue(I.getArgOperand(2));
5699     // @llvm.memcpy defines 0 and 1 to both mean no alignment.
5700     Align DstAlign = MCI.getDestAlign().valueOrOne();
5701     Align SrcAlign = MCI.getSourceAlign().valueOrOne();
5702     Align Alignment = commonAlignment(DstAlign, SrcAlign);
5703     bool isVol = MCI.isVolatile();
5704     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
5705     // FIXME: Support passing different dest/src alignments to the memcpy DAG
5706     // node.
5707     SDValue Root = isVol ? getRoot() : getMemoryRoot();
5708     SDValue MC = DAG.getMemcpy(Root, sdl, Op1, Op2, Op3, Alignment, isVol,
5709                                /* AlwaysInline */ false, isTC,
5710                                MachinePointerInfo(I.getArgOperand(0)),
5711                                MachinePointerInfo(I.getArgOperand(1)));
5712     updateDAGForMaybeTailCall(MC);
5713     return;
5714   }
5715   case Intrinsic::memcpy_inline: {
5716     const auto &MCI = cast<MemCpyInlineInst>(I);
5717     SDValue Dst = getValue(I.getArgOperand(0));
5718     SDValue Src = getValue(I.getArgOperand(1));
5719     SDValue Size = getValue(I.getArgOperand(2));
5720     assert(isa<ConstantSDNode>(Size) && "memcpy_inline needs constant size");
5721     // @llvm.memcpy.inline defines 0 and 1 to both mean no alignment.
5722     Align DstAlign = MCI.getDestAlign().valueOrOne();
5723     Align SrcAlign = MCI.getSourceAlign().valueOrOne();
5724     Align Alignment = commonAlignment(DstAlign, SrcAlign);
5725     bool isVol = MCI.isVolatile();
5726     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
5727     // FIXME: Support passing different dest/src alignments to the memcpy DAG
5728     // node.
5729     SDValue MC = DAG.getMemcpy(getRoot(), sdl, Dst, Src, Size, Alignment, isVol,
5730                                /* AlwaysInline */ true, isTC,
5731                                MachinePointerInfo(I.getArgOperand(0)),
5732                                MachinePointerInfo(I.getArgOperand(1)));
5733     updateDAGForMaybeTailCall(MC);
5734     return;
5735   }
5736   case Intrinsic::memset: {
5737     const auto &MSI = cast<MemSetInst>(I);
5738     SDValue Op1 = getValue(I.getArgOperand(0));
5739     SDValue Op2 = getValue(I.getArgOperand(1));
5740     SDValue Op3 = getValue(I.getArgOperand(2));
5741     // @llvm.memset defines 0 and 1 to both mean no alignment.
5742     Align Alignment = MSI.getDestAlign().valueOrOne();
5743     bool isVol = MSI.isVolatile();
5744     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
5745     SDValue Root = isVol ? getRoot() : getMemoryRoot();
5746     SDValue MS = DAG.getMemset(Root, sdl, Op1, Op2, Op3, Alignment, isVol, isTC,
5747                                MachinePointerInfo(I.getArgOperand(0)));
5748     updateDAGForMaybeTailCall(MS);
5749     return;
5750   }
5751   case Intrinsic::memmove: {
5752     const auto &MMI = cast<MemMoveInst>(I);
5753     SDValue Op1 = getValue(I.getArgOperand(0));
5754     SDValue Op2 = getValue(I.getArgOperand(1));
5755     SDValue Op3 = getValue(I.getArgOperand(2));
5756     // @llvm.memmove defines 0 and 1 to both mean no alignment.
5757     Align DstAlign = MMI.getDestAlign().valueOrOne();
5758     Align SrcAlign = MMI.getSourceAlign().valueOrOne();
5759     Align Alignment = commonAlignment(DstAlign, SrcAlign);
5760     bool isVol = MMI.isVolatile();
5761     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
5762     // FIXME: Support passing different dest/src alignments to the memmove DAG
5763     // node.
5764     SDValue Root = isVol ? getRoot() : getMemoryRoot();
5765     SDValue MM = DAG.getMemmove(Root, sdl, Op1, Op2, Op3, Alignment, isVol,
5766                                 isTC, MachinePointerInfo(I.getArgOperand(0)),
5767                                 MachinePointerInfo(I.getArgOperand(1)));
5768     updateDAGForMaybeTailCall(MM);
5769     return;
5770   }
5771   case Intrinsic::memcpy_element_unordered_atomic: {
5772     const AtomicMemCpyInst &MI = cast<AtomicMemCpyInst>(I);
5773     SDValue Dst = getValue(MI.getRawDest());
5774     SDValue Src = getValue(MI.getRawSource());
5775     SDValue Length = getValue(MI.getLength());
5776 
5777     unsigned DstAlign = MI.getDestAlignment();
5778     unsigned SrcAlign = MI.getSourceAlignment();
5779     Type *LengthTy = MI.getLength()->getType();
5780     unsigned ElemSz = MI.getElementSizeInBytes();
5781     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
5782     SDValue MC = DAG.getAtomicMemcpy(getRoot(), sdl, Dst, DstAlign, Src,
5783                                      SrcAlign, Length, LengthTy, ElemSz, isTC,
5784                                      MachinePointerInfo(MI.getRawDest()),
5785                                      MachinePointerInfo(MI.getRawSource()));
5786     updateDAGForMaybeTailCall(MC);
5787     return;
5788   }
5789   case Intrinsic::memmove_element_unordered_atomic: {
5790     auto &MI = cast<AtomicMemMoveInst>(I);
5791     SDValue Dst = getValue(MI.getRawDest());
5792     SDValue Src = getValue(MI.getRawSource());
5793     SDValue Length = getValue(MI.getLength());
5794 
5795     unsigned DstAlign = MI.getDestAlignment();
5796     unsigned SrcAlign = MI.getSourceAlignment();
5797     Type *LengthTy = MI.getLength()->getType();
5798     unsigned ElemSz = MI.getElementSizeInBytes();
5799     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
5800     SDValue MC = DAG.getAtomicMemmove(getRoot(), sdl, Dst, DstAlign, Src,
5801                                       SrcAlign, Length, LengthTy, ElemSz, isTC,
5802                                       MachinePointerInfo(MI.getRawDest()),
5803                                       MachinePointerInfo(MI.getRawSource()));
5804     updateDAGForMaybeTailCall(MC);
5805     return;
5806   }
5807   case Intrinsic::memset_element_unordered_atomic: {
5808     auto &MI = cast<AtomicMemSetInst>(I);
5809     SDValue Dst = getValue(MI.getRawDest());
5810     SDValue Val = getValue(MI.getValue());
5811     SDValue Length = getValue(MI.getLength());
5812 
5813     unsigned DstAlign = MI.getDestAlignment();
5814     Type *LengthTy = MI.getLength()->getType();
5815     unsigned ElemSz = MI.getElementSizeInBytes();
5816     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
5817     SDValue MC = DAG.getAtomicMemset(getRoot(), sdl, Dst, DstAlign, Val, Length,
5818                                      LengthTy, ElemSz, isTC,
5819                                      MachinePointerInfo(MI.getRawDest()));
5820     updateDAGForMaybeTailCall(MC);
5821     return;
5822   }
5823   case Intrinsic::call_preallocated_setup: {
5824     const CallBase *PreallocatedCall = FindPreallocatedCall(&I);
5825     SDValue SrcValue = DAG.getSrcValue(PreallocatedCall);
5826     SDValue Res = DAG.getNode(ISD::PREALLOCATED_SETUP, sdl, MVT::Other,
5827                               getRoot(), SrcValue);
5828     setValue(&I, Res);
5829     DAG.setRoot(Res);
5830     return;
5831   }
5832   case Intrinsic::call_preallocated_arg: {
5833     const CallBase *PreallocatedCall = FindPreallocatedCall(I.getOperand(0));
5834     SDValue SrcValue = DAG.getSrcValue(PreallocatedCall);
5835     SDValue Ops[3];
5836     Ops[0] = getRoot();
5837     Ops[1] = SrcValue;
5838     Ops[2] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(1)), sdl,
5839                                    MVT::i32); // arg index
5840     SDValue Res = DAG.getNode(
5841         ISD::PREALLOCATED_ARG, sdl,
5842         DAG.getVTList(TLI.getPointerTy(DAG.getDataLayout()), MVT::Other), Ops);
5843     setValue(&I, Res);
5844     DAG.setRoot(Res.getValue(1));
5845     return;
5846   }
5847   case Intrinsic::dbg_addr:
5848   case Intrinsic::dbg_declare: {
5849     const auto &DI = cast<DbgVariableIntrinsic>(I);
5850     DILocalVariable *Variable = DI.getVariable();
5851     DIExpression *Expression = DI.getExpression();
5852     dropDanglingDebugInfo(Variable, Expression);
5853     assert(Variable && "Missing variable");
5854     LLVM_DEBUG(dbgs() << "SelectionDAG visiting debug intrinsic: " << DI
5855                       << "\n");
5856     // Check if address has undef value.
5857     const Value *Address = DI.getVariableLocation();
5858     if (!Address || isa<UndefValue>(Address) ||
5859         (Address->use_empty() && !isa<Argument>(Address))) {
5860       LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI
5861                         << " (bad/undef/unused-arg address)\n");
5862       return;
5863     }
5864 
5865     bool isParameter = Variable->isParameter() || isa<Argument>(Address);
5866 
5867     // Check if this variable can be described by a frame index, typically
5868     // either as a static alloca or a byval parameter.
5869     int FI = std::numeric_limits<int>::max();
5870     if (const auto *AI =
5871             dyn_cast<AllocaInst>(Address->stripInBoundsConstantOffsets())) {
5872       if (AI->isStaticAlloca()) {
5873         auto I = FuncInfo.StaticAllocaMap.find(AI);
5874         if (I != FuncInfo.StaticAllocaMap.end())
5875           FI = I->second;
5876       }
5877     } else if (const auto *Arg = dyn_cast<Argument>(
5878                    Address->stripInBoundsConstantOffsets())) {
5879       FI = FuncInfo.getArgumentFrameIndex(Arg);
5880     }
5881 
5882     // llvm.dbg.addr is control dependent and always generates indirect
5883     // DBG_VALUE instructions. llvm.dbg.declare is handled as a frame index in
5884     // the MachineFunction variable table.
5885     if (FI != std::numeric_limits<int>::max()) {
5886       if (Intrinsic == Intrinsic::dbg_addr) {
5887         SDDbgValue *SDV = DAG.getFrameIndexDbgValue(
5888             Variable, Expression, FI, /*IsIndirect*/ true, dl, SDNodeOrder);
5889         DAG.AddDbgValue(SDV, getRoot().getNode(), isParameter);
5890       } else {
5891         LLVM_DEBUG(dbgs() << "Skipping " << DI
5892                           << " (variable info stashed in MF side table)\n");
5893       }
5894       return;
5895     }
5896 
5897     SDValue &N = NodeMap[Address];
5898     if (!N.getNode() && isa<Argument>(Address))
5899       // Check unused arguments map.
5900       N = UnusedArgNodeMap[Address];
5901     SDDbgValue *SDV;
5902     if (N.getNode()) {
5903       if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
5904         Address = BCI->getOperand(0);
5905       // Parameters are handled specially.
5906       auto FINode = dyn_cast<FrameIndexSDNode>(N.getNode());
5907       if (isParameter && FINode) {
5908         // Byval parameter. We have a frame index at this point.
5909         SDV =
5910             DAG.getFrameIndexDbgValue(Variable, Expression, FINode->getIndex(),
5911                                       /*IsIndirect*/ true, dl, SDNodeOrder);
5912       } else if (isa<Argument>(Address)) {
5913         // Address is an argument, so try to emit its dbg value using
5914         // virtual register info from the FuncInfo.ValueMap.
5915         EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, true, N);
5916         return;
5917       } else {
5918         SDV = DAG.getDbgValue(Variable, Expression, N.getNode(), N.getResNo(),
5919                               true, dl, SDNodeOrder);
5920       }
5921       DAG.AddDbgValue(SDV, N.getNode(), isParameter);
5922     } else {
5923       // If Address is an argument then try to emit its dbg value using
5924       // virtual register info from the FuncInfo.ValueMap.
5925       if (!EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, true,
5926                                     N)) {
5927         LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI
5928                           << " (could not emit func-arg dbg_value)\n");
5929       }
5930     }
5931     return;
5932   }
5933   case Intrinsic::dbg_label: {
5934     const DbgLabelInst &DI = cast<DbgLabelInst>(I);
5935     DILabel *Label = DI.getLabel();
5936     assert(Label && "Missing label");
5937 
5938     SDDbgLabel *SDV;
5939     SDV = DAG.getDbgLabel(Label, dl, SDNodeOrder);
5940     DAG.AddDbgLabel(SDV);
5941     return;
5942   }
5943   case Intrinsic::dbg_value: {
5944     const DbgValueInst &DI = cast<DbgValueInst>(I);
5945     assert(DI.getVariable() && "Missing variable");
5946 
5947     DILocalVariable *Variable = DI.getVariable();
5948     DIExpression *Expression = DI.getExpression();
5949     dropDanglingDebugInfo(Variable, Expression);
5950     const Value *V = DI.getValue();
5951     if (!V)
5952       return;
5953 
5954     if (handleDebugValue(V, Variable, Expression, dl, DI.getDebugLoc(),
5955         SDNodeOrder))
5956       return;
5957 
5958     // TODO: Dangling debug info will eventually either be resolved or produce
5959     // an Undef DBG_VALUE. However in the resolution case, a gap may appear
5960     // between the original dbg.value location and its resolved DBG_VALUE, which
5961     // we should ideally fill with an extra Undef DBG_VALUE.
5962 
5963     DanglingDebugInfoMap[V].emplace_back(&DI, dl, SDNodeOrder);
5964     return;
5965   }
5966 
5967   case Intrinsic::eh_typeid_for: {
5968     // Find the type id for the given typeinfo.
5969     GlobalValue *GV = ExtractTypeInfo(I.getArgOperand(0));
5970     unsigned TypeID = DAG.getMachineFunction().getTypeIDFor(GV);
5971     Res = DAG.getConstant(TypeID, sdl, MVT::i32);
5972     setValue(&I, Res);
5973     return;
5974   }
5975 
5976   case Intrinsic::eh_return_i32:
5977   case Intrinsic::eh_return_i64:
5978     DAG.getMachineFunction().setCallsEHReturn(true);
5979     DAG.setRoot(DAG.getNode(ISD::EH_RETURN, sdl,
5980                             MVT::Other,
5981                             getControlRoot(),
5982                             getValue(I.getArgOperand(0)),
5983                             getValue(I.getArgOperand(1))));
5984     return;
5985   case Intrinsic::eh_unwind_init:
5986     DAG.getMachineFunction().setCallsUnwindInit(true);
5987     return;
5988   case Intrinsic::eh_dwarf_cfa:
5989     setValue(&I, DAG.getNode(ISD::EH_DWARF_CFA, sdl,
5990                              TLI.getPointerTy(DAG.getDataLayout()),
5991                              getValue(I.getArgOperand(0))));
5992     return;
5993   case Intrinsic::eh_sjlj_callsite: {
5994     MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
5995     ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(0));
5996     assert(CI && "Non-constant call site value in eh.sjlj.callsite!");
5997     assert(MMI.getCurrentCallSite() == 0 && "Overlapping call sites!");
5998 
5999     MMI.setCurrentCallSite(CI->getZExtValue());
6000     return;
6001   }
6002   case Intrinsic::eh_sjlj_functioncontext: {
6003     // Get and store the index of the function context.
6004     MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
6005     AllocaInst *FnCtx =
6006       cast<AllocaInst>(I.getArgOperand(0)->stripPointerCasts());
6007     int FI = FuncInfo.StaticAllocaMap[FnCtx];
6008     MFI.setFunctionContextIndex(FI);
6009     return;
6010   }
6011   case Intrinsic::eh_sjlj_setjmp: {
6012     SDValue Ops[2];
6013     Ops[0] = getRoot();
6014     Ops[1] = getValue(I.getArgOperand(0));
6015     SDValue Op = DAG.getNode(ISD::EH_SJLJ_SETJMP, sdl,
6016                              DAG.getVTList(MVT::i32, MVT::Other), Ops);
6017     setValue(&I, Op.getValue(0));
6018     DAG.setRoot(Op.getValue(1));
6019     return;
6020   }
6021   case Intrinsic::eh_sjlj_longjmp:
6022     DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, sdl, MVT::Other,
6023                             getRoot(), getValue(I.getArgOperand(0))));
6024     return;
6025   case Intrinsic::eh_sjlj_setup_dispatch:
6026     DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_SETUP_DISPATCH, sdl, MVT::Other,
6027                             getRoot()));
6028     return;
6029   case Intrinsic::masked_gather:
6030     visitMaskedGather(I);
6031     return;
6032   case Intrinsic::masked_load:
6033     visitMaskedLoad(I);
6034     return;
6035   case Intrinsic::masked_scatter:
6036     visitMaskedScatter(I);
6037     return;
6038   case Intrinsic::masked_store:
6039     visitMaskedStore(I);
6040     return;
6041   case Intrinsic::masked_expandload:
6042     visitMaskedLoad(I, true /* IsExpanding */);
6043     return;
6044   case Intrinsic::masked_compressstore:
6045     visitMaskedStore(I, true /* IsCompressing */);
6046     return;
6047   case Intrinsic::powi:
6048     setValue(&I, ExpandPowI(sdl, getValue(I.getArgOperand(0)),
6049                             getValue(I.getArgOperand(1)), DAG));
6050     return;
6051   case Intrinsic::log:
6052     setValue(&I, expandLog(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
6053     return;
6054   case Intrinsic::log2:
6055     setValue(&I, expandLog2(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
6056     return;
6057   case Intrinsic::log10:
6058     setValue(&I, expandLog10(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
6059     return;
6060   case Intrinsic::exp:
6061     setValue(&I, expandExp(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
6062     return;
6063   case Intrinsic::exp2:
6064     setValue(&I, expandExp2(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
6065     return;
6066   case Intrinsic::pow:
6067     setValue(&I, expandPow(sdl, getValue(I.getArgOperand(0)),
6068                            getValue(I.getArgOperand(1)), DAG, TLI));
6069     return;
6070   case Intrinsic::sqrt:
6071   case Intrinsic::fabs:
6072   case Intrinsic::sin:
6073   case Intrinsic::cos:
6074   case Intrinsic::floor:
6075   case Intrinsic::ceil:
6076   case Intrinsic::trunc:
6077   case Intrinsic::rint:
6078   case Intrinsic::nearbyint:
6079   case Intrinsic::round:
6080   case Intrinsic::roundeven:
6081   case Intrinsic::canonicalize: {
6082     unsigned Opcode;
6083     switch (Intrinsic) {
6084     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
6085     case Intrinsic::sqrt:      Opcode = ISD::FSQRT;      break;
6086     case Intrinsic::fabs:      Opcode = ISD::FABS;       break;
6087     case Intrinsic::sin:       Opcode = ISD::FSIN;       break;
6088     case Intrinsic::cos:       Opcode = ISD::FCOS;       break;
6089     case Intrinsic::floor:     Opcode = ISD::FFLOOR;     break;
6090     case Intrinsic::ceil:      Opcode = ISD::FCEIL;      break;
6091     case Intrinsic::trunc:     Opcode = ISD::FTRUNC;     break;
6092     case Intrinsic::rint:      Opcode = ISD::FRINT;      break;
6093     case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break;
6094     case Intrinsic::round:     Opcode = ISD::FROUND;     break;
6095     case Intrinsic::roundeven: Opcode = ISD::FROUNDEVEN; break;
6096     case Intrinsic::canonicalize: Opcode = ISD::FCANONICALIZE; break;
6097     }
6098 
6099     setValue(&I, DAG.getNode(Opcode, sdl,
6100                              getValue(I.getArgOperand(0)).getValueType(),
6101                              getValue(I.getArgOperand(0))));
6102     return;
6103   }
6104   case Intrinsic::lround:
6105   case Intrinsic::llround:
6106   case Intrinsic::lrint:
6107   case Intrinsic::llrint: {
6108     unsigned Opcode;
6109     switch (Intrinsic) {
6110     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
6111     case Intrinsic::lround:  Opcode = ISD::LROUND;  break;
6112     case Intrinsic::llround: Opcode = ISD::LLROUND; break;
6113     case Intrinsic::lrint:   Opcode = ISD::LRINT;   break;
6114     case Intrinsic::llrint:  Opcode = ISD::LLRINT;  break;
6115     }
6116 
6117     EVT RetVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6118     setValue(&I, DAG.getNode(Opcode, sdl, RetVT,
6119                              getValue(I.getArgOperand(0))));
6120     return;
6121   }
6122   case Intrinsic::minnum:
6123     setValue(&I, DAG.getNode(ISD::FMINNUM, sdl,
6124                              getValue(I.getArgOperand(0)).getValueType(),
6125                              getValue(I.getArgOperand(0)),
6126                              getValue(I.getArgOperand(1))));
6127     return;
6128   case Intrinsic::maxnum:
6129     setValue(&I, DAG.getNode(ISD::FMAXNUM, sdl,
6130                              getValue(I.getArgOperand(0)).getValueType(),
6131                              getValue(I.getArgOperand(0)),
6132                              getValue(I.getArgOperand(1))));
6133     return;
6134   case Intrinsic::minimum:
6135     setValue(&I, DAG.getNode(ISD::FMINIMUM, sdl,
6136                              getValue(I.getArgOperand(0)).getValueType(),
6137                              getValue(I.getArgOperand(0)),
6138                              getValue(I.getArgOperand(1))));
6139     return;
6140   case Intrinsic::maximum:
6141     setValue(&I, DAG.getNode(ISD::FMAXIMUM, sdl,
6142                              getValue(I.getArgOperand(0)).getValueType(),
6143                              getValue(I.getArgOperand(0)),
6144                              getValue(I.getArgOperand(1))));
6145     return;
6146   case Intrinsic::copysign:
6147     setValue(&I, DAG.getNode(ISD::FCOPYSIGN, sdl,
6148                              getValue(I.getArgOperand(0)).getValueType(),
6149                              getValue(I.getArgOperand(0)),
6150                              getValue(I.getArgOperand(1))));
6151     return;
6152   case Intrinsic::fma:
6153     setValue(&I, DAG.getNode(ISD::FMA, sdl,
6154                              getValue(I.getArgOperand(0)).getValueType(),
6155                              getValue(I.getArgOperand(0)),
6156                              getValue(I.getArgOperand(1)),
6157                              getValue(I.getArgOperand(2))));
6158     return;
6159 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC)                         \
6160   case Intrinsic::INTRINSIC:
6161 #include "llvm/IR/ConstrainedOps.def"
6162     visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(I));
6163     return;
6164   case Intrinsic::fmuladd: {
6165     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6166     if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
6167         TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT)) {
6168       setValue(&I, DAG.getNode(ISD::FMA, sdl,
6169                                getValue(I.getArgOperand(0)).getValueType(),
6170                                getValue(I.getArgOperand(0)),
6171                                getValue(I.getArgOperand(1)),
6172                                getValue(I.getArgOperand(2))));
6173     } else {
6174       // TODO: Intrinsic calls should have fast-math-flags.
6175       SDValue Mul = DAG.getNode(ISD::FMUL, sdl,
6176                                 getValue(I.getArgOperand(0)).getValueType(),
6177                                 getValue(I.getArgOperand(0)),
6178                                 getValue(I.getArgOperand(1)));
6179       SDValue Add = DAG.getNode(ISD::FADD, sdl,
6180                                 getValue(I.getArgOperand(0)).getValueType(),
6181                                 Mul,
6182                                 getValue(I.getArgOperand(2)));
6183       setValue(&I, Add);
6184     }
6185     return;
6186   }
6187   case Intrinsic::convert_to_fp16:
6188     setValue(&I, DAG.getNode(ISD::BITCAST, sdl, MVT::i16,
6189                              DAG.getNode(ISD::FP_ROUND, sdl, MVT::f16,
6190                                          getValue(I.getArgOperand(0)),
6191                                          DAG.getTargetConstant(0, sdl,
6192                                                                MVT::i32))));
6193     return;
6194   case Intrinsic::convert_from_fp16:
6195     setValue(&I, DAG.getNode(ISD::FP_EXTEND, sdl,
6196                              TLI.getValueType(DAG.getDataLayout(), I.getType()),
6197                              DAG.getNode(ISD::BITCAST, sdl, MVT::f16,
6198                                          getValue(I.getArgOperand(0)))));
6199     return;
6200   case Intrinsic::pcmarker: {
6201     SDValue Tmp = getValue(I.getArgOperand(0));
6202     DAG.setRoot(DAG.getNode(ISD::PCMARKER, sdl, MVT::Other, getRoot(), Tmp));
6203     return;
6204   }
6205   case Intrinsic::readcyclecounter: {
6206     SDValue Op = getRoot();
6207     Res = DAG.getNode(ISD::READCYCLECOUNTER, sdl,
6208                       DAG.getVTList(MVT::i64, MVT::Other), Op);
6209     setValue(&I, Res);
6210     DAG.setRoot(Res.getValue(1));
6211     return;
6212   }
6213   case Intrinsic::bitreverse:
6214     setValue(&I, DAG.getNode(ISD::BITREVERSE, sdl,
6215                              getValue(I.getArgOperand(0)).getValueType(),
6216                              getValue(I.getArgOperand(0))));
6217     return;
6218   case Intrinsic::bswap:
6219     setValue(&I, DAG.getNode(ISD::BSWAP, sdl,
6220                              getValue(I.getArgOperand(0)).getValueType(),
6221                              getValue(I.getArgOperand(0))));
6222     return;
6223   case Intrinsic::cttz: {
6224     SDValue Arg = getValue(I.getArgOperand(0));
6225     ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
6226     EVT Ty = Arg.getValueType();
6227     setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_UNDEF,
6228                              sdl, Ty, Arg));
6229     return;
6230   }
6231   case Intrinsic::ctlz: {
6232     SDValue Arg = getValue(I.getArgOperand(0));
6233     ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
6234     EVT Ty = Arg.getValueType();
6235     setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_UNDEF,
6236                              sdl, Ty, Arg));
6237     return;
6238   }
6239   case Intrinsic::ctpop: {
6240     SDValue Arg = getValue(I.getArgOperand(0));
6241     EVT Ty = Arg.getValueType();
6242     setValue(&I, DAG.getNode(ISD::CTPOP, sdl, Ty, Arg));
6243     return;
6244   }
6245   case Intrinsic::fshl:
6246   case Intrinsic::fshr: {
6247     bool IsFSHL = Intrinsic == Intrinsic::fshl;
6248     SDValue X = getValue(I.getArgOperand(0));
6249     SDValue Y = getValue(I.getArgOperand(1));
6250     SDValue Z = getValue(I.getArgOperand(2));
6251     EVT VT = X.getValueType();
6252     SDValue BitWidthC = DAG.getConstant(VT.getScalarSizeInBits(), sdl, VT);
6253     SDValue Zero = DAG.getConstant(0, sdl, VT);
6254     SDValue ShAmt = DAG.getNode(ISD::UREM, sdl, VT, Z, BitWidthC);
6255 
6256     auto FunnelOpcode = IsFSHL ? ISD::FSHL : ISD::FSHR;
6257     if (TLI.isOperationLegalOrCustom(FunnelOpcode, VT)) {
6258       setValue(&I, DAG.getNode(FunnelOpcode, sdl, VT, X, Y, Z));
6259       return;
6260     }
6261 
6262     // When X == Y, this is rotate. If the data type has a power-of-2 size, we
6263     // avoid the select that is necessary in the general case to filter out
6264     // the 0-shift possibility that leads to UB.
6265     if (X == Y && isPowerOf2_32(VT.getScalarSizeInBits())) {
6266       auto RotateOpcode = IsFSHL ? ISD::ROTL : ISD::ROTR;
6267       if (TLI.isOperationLegalOrCustom(RotateOpcode, VT)) {
6268         setValue(&I, DAG.getNode(RotateOpcode, sdl, VT, X, Z));
6269         return;
6270       }
6271 
6272       // Some targets only rotate one way. Try the opposite direction.
6273       RotateOpcode = IsFSHL ? ISD::ROTR : ISD::ROTL;
6274       if (TLI.isOperationLegalOrCustom(RotateOpcode, VT)) {
6275         // Negate the shift amount because it is safe to ignore the high bits.
6276         SDValue NegShAmt = DAG.getNode(ISD::SUB, sdl, VT, Zero, Z);
6277         setValue(&I, DAG.getNode(RotateOpcode, sdl, VT, X, NegShAmt));
6278         return;
6279       }
6280 
6281       // fshl (rotl): (X << (Z % BW)) | (X >> ((0 - Z) % BW))
6282       // fshr (rotr): (X << ((0 - Z) % BW)) | (X >> (Z % BW))
6283       SDValue NegZ = DAG.getNode(ISD::SUB, sdl, VT, Zero, Z);
6284       SDValue NShAmt = DAG.getNode(ISD::UREM, sdl, VT, NegZ, BitWidthC);
6285       SDValue ShX = DAG.getNode(ISD::SHL, sdl, VT, X, IsFSHL ? ShAmt : NShAmt);
6286       SDValue ShY = DAG.getNode(ISD::SRL, sdl, VT, X, IsFSHL ? NShAmt : ShAmt);
6287       setValue(&I, DAG.getNode(ISD::OR, sdl, VT, ShX, ShY));
6288       return;
6289     }
6290 
6291     // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
6292     // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
6293     SDValue InvShAmt = DAG.getNode(ISD::SUB, sdl, VT, BitWidthC, ShAmt);
6294     SDValue ShX = DAG.getNode(ISD::SHL, sdl, VT, X, IsFSHL ? ShAmt : InvShAmt);
6295     SDValue ShY = DAG.getNode(ISD::SRL, sdl, VT, Y, IsFSHL ? InvShAmt : ShAmt);
6296     SDValue Or = DAG.getNode(ISD::OR, sdl, VT, ShX, ShY);
6297 
6298     // If (Z % BW == 0), then the opposite direction shift is shift-by-bitwidth,
6299     // and that is undefined. We must compare and select to avoid UB.
6300     EVT CCVT = MVT::i1;
6301     if (VT.isVector())
6302       CCVT = EVT::getVectorVT(*Context, CCVT, VT.getVectorNumElements());
6303 
6304     // For fshl, 0-shift returns the 1st arg (X).
6305     // For fshr, 0-shift returns the 2nd arg (Y).
6306     SDValue IsZeroShift = DAG.getSetCC(sdl, CCVT, ShAmt, Zero, ISD::SETEQ);
6307     setValue(&I, DAG.getSelect(sdl, VT, IsZeroShift, IsFSHL ? X : Y, Or));
6308     return;
6309   }
6310   case Intrinsic::sadd_sat: {
6311     SDValue Op1 = getValue(I.getArgOperand(0));
6312     SDValue Op2 = getValue(I.getArgOperand(1));
6313     setValue(&I, DAG.getNode(ISD::SADDSAT, sdl, Op1.getValueType(), Op1, Op2));
6314     return;
6315   }
6316   case Intrinsic::uadd_sat: {
6317     SDValue Op1 = getValue(I.getArgOperand(0));
6318     SDValue Op2 = getValue(I.getArgOperand(1));
6319     setValue(&I, DAG.getNode(ISD::UADDSAT, sdl, Op1.getValueType(), Op1, Op2));
6320     return;
6321   }
6322   case Intrinsic::ssub_sat: {
6323     SDValue Op1 = getValue(I.getArgOperand(0));
6324     SDValue Op2 = getValue(I.getArgOperand(1));
6325     setValue(&I, DAG.getNode(ISD::SSUBSAT, sdl, Op1.getValueType(), Op1, Op2));
6326     return;
6327   }
6328   case Intrinsic::usub_sat: {
6329     SDValue Op1 = getValue(I.getArgOperand(0));
6330     SDValue Op2 = getValue(I.getArgOperand(1));
6331     setValue(&I, DAG.getNode(ISD::USUBSAT, sdl, Op1.getValueType(), Op1, Op2));
6332     return;
6333   }
6334   case Intrinsic::smul_fix:
6335   case Intrinsic::umul_fix:
6336   case Intrinsic::smul_fix_sat:
6337   case Intrinsic::umul_fix_sat: {
6338     SDValue Op1 = getValue(I.getArgOperand(0));
6339     SDValue Op2 = getValue(I.getArgOperand(1));
6340     SDValue Op3 = getValue(I.getArgOperand(2));
6341     setValue(&I, DAG.getNode(FixedPointIntrinsicToOpcode(Intrinsic), sdl,
6342                              Op1.getValueType(), Op1, Op2, Op3));
6343     return;
6344   }
6345   case Intrinsic::sdiv_fix:
6346   case Intrinsic::udiv_fix:
6347   case Intrinsic::sdiv_fix_sat:
6348   case Intrinsic::udiv_fix_sat: {
6349     SDValue Op1 = getValue(I.getArgOperand(0));
6350     SDValue Op2 = getValue(I.getArgOperand(1));
6351     SDValue Op3 = getValue(I.getArgOperand(2));
6352     setValue(&I, expandDivFix(FixedPointIntrinsicToOpcode(Intrinsic), sdl,
6353                               Op1, Op2, Op3, DAG, TLI));
6354     return;
6355   }
6356   case Intrinsic::stacksave: {
6357     SDValue Op = getRoot();
6358     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6359     Res = DAG.getNode(ISD::STACKSAVE, sdl, DAG.getVTList(VT, MVT::Other), Op);
6360     setValue(&I, Res);
6361     DAG.setRoot(Res.getValue(1));
6362     return;
6363   }
6364   case Intrinsic::stackrestore:
6365     Res = getValue(I.getArgOperand(0));
6366     DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other, getRoot(), Res));
6367     return;
6368   case Intrinsic::get_dynamic_area_offset: {
6369     SDValue Op = getRoot();
6370     EVT PtrTy = TLI.getFrameIndexTy(DAG.getDataLayout());
6371     EVT ResTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
6372     // Result type for @llvm.get.dynamic.area.offset should match PtrTy for
6373     // target.
6374     if (PtrTy.getSizeInBits() < ResTy.getSizeInBits())
6375       report_fatal_error("Wrong result type for @llvm.get.dynamic.area.offset"
6376                          " intrinsic!");
6377     Res = DAG.getNode(ISD::GET_DYNAMIC_AREA_OFFSET, sdl, DAG.getVTList(ResTy),
6378                       Op);
6379     DAG.setRoot(Op);
6380     setValue(&I, Res);
6381     return;
6382   }
6383   case Intrinsic::stackguard: {
6384     MachineFunction &MF = DAG.getMachineFunction();
6385     const Module &M = *MF.getFunction().getParent();
6386     SDValue Chain = getRoot();
6387     if (TLI.useLoadStackGuardNode()) {
6388       Res = getLoadStackGuard(DAG, sdl, Chain);
6389     } else {
6390       EVT PtrTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
6391       const Value *Global = TLI.getSDagStackGuard(M);
6392       unsigned Align = DL->getPrefTypeAlignment(Global->getType());
6393       Res = DAG.getLoad(PtrTy, sdl, Chain, getValue(Global),
6394                         MachinePointerInfo(Global, 0), Align,
6395                         MachineMemOperand::MOVolatile);
6396     }
6397     if (TLI.useStackGuardXorFP())
6398       Res = TLI.emitStackGuardXorFP(DAG, Res, sdl);
6399     DAG.setRoot(Chain);
6400     setValue(&I, Res);
6401     return;
6402   }
6403   case Intrinsic::stackprotector: {
6404     // Emit code into the DAG to store the stack guard onto the stack.
6405     MachineFunction &MF = DAG.getMachineFunction();
6406     MachineFrameInfo &MFI = MF.getFrameInfo();
6407     SDValue Src, Chain = getRoot();
6408 
6409     if (TLI.useLoadStackGuardNode())
6410       Src = getLoadStackGuard(DAG, sdl, Chain);
6411     else
6412       Src = getValue(I.getArgOperand(0));   // The guard's value.
6413 
6414     AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
6415 
6416     int FI = FuncInfo.StaticAllocaMap[Slot];
6417     MFI.setStackProtectorIndex(FI);
6418     EVT PtrTy = TLI.getFrameIndexTy(DAG.getDataLayout());
6419 
6420     SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
6421 
6422     // Store the stack protector onto the stack.
6423     Res = DAG.getStore(Chain, sdl, Src, FIN, MachinePointerInfo::getFixedStack(
6424                                                  DAG.getMachineFunction(), FI),
6425                        /* Alignment = */ 0, MachineMemOperand::MOVolatile);
6426     setValue(&I, Res);
6427     DAG.setRoot(Res);
6428     return;
6429   }
6430   case Intrinsic::objectsize:
6431     llvm_unreachable("llvm.objectsize.* should have been lowered already");
6432 
6433   case Intrinsic::is_constant:
6434     llvm_unreachable("llvm.is.constant.* should have been lowered already");
6435 
6436   case Intrinsic::annotation:
6437   case Intrinsic::ptr_annotation:
6438   case Intrinsic::launder_invariant_group:
6439   case Intrinsic::strip_invariant_group:
6440     // Drop the intrinsic, but forward the value
6441     setValue(&I, getValue(I.getOperand(0)));
6442     return;
6443   case Intrinsic::assume:
6444   case Intrinsic::var_annotation:
6445   case Intrinsic::sideeffect:
6446     // Discard annotate attributes, assumptions, and artificial side-effects.
6447     return;
6448 
6449   case Intrinsic::codeview_annotation: {
6450     // Emit a label associated with this metadata.
6451     MachineFunction &MF = DAG.getMachineFunction();
6452     MCSymbol *Label =
6453         MF.getMMI().getContext().createTempSymbol("annotation", true);
6454     Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(0))->getMetadata();
6455     MF.addCodeViewAnnotation(Label, cast<MDNode>(MD));
6456     Res = DAG.getLabelNode(ISD::ANNOTATION_LABEL, sdl, getRoot(), Label);
6457     DAG.setRoot(Res);
6458     return;
6459   }
6460 
6461   case Intrinsic::init_trampoline: {
6462     const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts());
6463 
6464     SDValue Ops[6];
6465     Ops[0] = getRoot();
6466     Ops[1] = getValue(I.getArgOperand(0));
6467     Ops[2] = getValue(I.getArgOperand(1));
6468     Ops[3] = getValue(I.getArgOperand(2));
6469     Ops[4] = DAG.getSrcValue(I.getArgOperand(0));
6470     Ops[5] = DAG.getSrcValue(F);
6471 
6472     Res = DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other, Ops);
6473 
6474     DAG.setRoot(Res);
6475     return;
6476   }
6477   case Intrinsic::adjust_trampoline:
6478     setValue(&I, DAG.getNode(ISD::ADJUST_TRAMPOLINE, sdl,
6479                              TLI.getPointerTy(DAG.getDataLayout()),
6480                              getValue(I.getArgOperand(0))));
6481     return;
6482   case Intrinsic::gcroot: {
6483     assert(DAG.getMachineFunction().getFunction().hasGC() &&
6484            "only valid in functions with gc specified, enforced by Verifier");
6485     assert(GFI && "implied by previous");
6486     const Value *Alloca = I.getArgOperand(0)->stripPointerCasts();
6487     const Constant *TypeMap = cast<Constant>(I.getArgOperand(1));
6488 
6489     FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
6490     GFI->addStackRoot(FI->getIndex(), TypeMap);
6491     return;
6492   }
6493   case Intrinsic::gcread:
6494   case Intrinsic::gcwrite:
6495     llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
6496   case Intrinsic::flt_rounds:
6497     Res = DAG.getNode(ISD::FLT_ROUNDS_, sdl, {MVT::i32, MVT::Other}, getRoot());
6498     setValue(&I, Res);
6499     DAG.setRoot(Res.getValue(1));
6500     return;
6501 
6502   case Intrinsic::expect:
6503     // Just replace __builtin_expect(exp, c) with EXP.
6504     setValue(&I, getValue(I.getArgOperand(0)));
6505     return;
6506 
6507   case Intrinsic::debugtrap:
6508   case Intrinsic::trap: {
6509     StringRef TrapFuncName =
6510         I.getAttributes()
6511             .getAttribute(AttributeList::FunctionIndex, "trap-func-name")
6512             .getValueAsString();
6513     if (TrapFuncName.empty()) {
6514       ISD::NodeType Op = (Intrinsic == Intrinsic::trap) ?
6515         ISD::TRAP : ISD::DEBUGTRAP;
6516       DAG.setRoot(DAG.getNode(Op, sdl,MVT::Other, getRoot()));
6517       return;
6518     }
6519     TargetLowering::ArgListTy Args;
6520 
6521     TargetLowering::CallLoweringInfo CLI(DAG);
6522     CLI.setDebugLoc(sdl).setChain(getRoot()).setLibCallee(
6523         CallingConv::C, I.getType(),
6524         DAG.getExternalSymbol(TrapFuncName.data(),
6525                               TLI.getPointerTy(DAG.getDataLayout())),
6526         std::move(Args));
6527 
6528     std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
6529     DAG.setRoot(Result.second);
6530     return;
6531   }
6532 
6533   case Intrinsic::uadd_with_overflow:
6534   case Intrinsic::sadd_with_overflow:
6535   case Intrinsic::usub_with_overflow:
6536   case Intrinsic::ssub_with_overflow:
6537   case Intrinsic::umul_with_overflow:
6538   case Intrinsic::smul_with_overflow: {
6539     ISD::NodeType Op;
6540     switch (Intrinsic) {
6541     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
6542     case Intrinsic::uadd_with_overflow: Op = ISD::UADDO; break;
6543     case Intrinsic::sadd_with_overflow: Op = ISD::SADDO; break;
6544     case Intrinsic::usub_with_overflow: Op = ISD::USUBO; break;
6545     case Intrinsic::ssub_with_overflow: Op = ISD::SSUBO; break;
6546     case Intrinsic::umul_with_overflow: Op = ISD::UMULO; break;
6547     case Intrinsic::smul_with_overflow: Op = ISD::SMULO; break;
6548     }
6549     SDValue Op1 = getValue(I.getArgOperand(0));
6550     SDValue Op2 = getValue(I.getArgOperand(1));
6551 
6552     EVT ResultVT = Op1.getValueType();
6553     EVT OverflowVT = MVT::i1;
6554     if (ResultVT.isVector())
6555       OverflowVT = EVT::getVectorVT(
6556           *Context, OverflowVT, ResultVT.getVectorNumElements());
6557 
6558     SDVTList VTs = DAG.getVTList(ResultVT, OverflowVT);
6559     setValue(&I, DAG.getNode(Op, sdl, VTs, Op1, Op2));
6560     return;
6561   }
6562   case Intrinsic::prefetch: {
6563     SDValue Ops[5];
6564     unsigned rw = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
6565     auto Flags = rw == 0 ? MachineMemOperand::MOLoad :MachineMemOperand::MOStore;
6566     Ops[0] = DAG.getRoot();
6567     Ops[1] = getValue(I.getArgOperand(0));
6568     Ops[2] = getValue(I.getArgOperand(1));
6569     Ops[3] = getValue(I.getArgOperand(2));
6570     Ops[4] = getValue(I.getArgOperand(3));
6571     SDValue Result = DAG.getMemIntrinsicNode(
6572         ISD::PREFETCH, sdl, DAG.getVTList(MVT::Other), Ops,
6573         EVT::getIntegerVT(*Context, 8), MachinePointerInfo(I.getArgOperand(0)),
6574         /* align */ None, Flags);
6575 
6576     // Chain the prefetch in parallell with any pending loads, to stay out of
6577     // the way of later optimizations.
6578     PendingLoads.push_back(Result);
6579     Result = getRoot();
6580     DAG.setRoot(Result);
6581     return;
6582   }
6583   case Intrinsic::lifetime_start:
6584   case Intrinsic::lifetime_end: {
6585     bool IsStart = (Intrinsic == Intrinsic::lifetime_start);
6586     // Stack coloring is not enabled in O0, discard region information.
6587     if (TM.getOptLevel() == CodeGenOpt::None)
6588       return;
6589 
6590     const int64_t ObjectSize =
6591         cast<ConstantInt>(I.getArgOperand(0))->getSExtValue();
6592     Value *const ObjectPtr = I.getArgOperand(1);
6593     SmallVector<const Value *, 4> Allocas;
6594     GetUnderlyingObjects(ObjectPtr, Allocas, *DL);
6595 
6596     for (SmallVectorImpl<const Value*>::iterator Object = Allocas.begin(),
6597            E = Allocas.end(); Object != E; ++Object) {
6598       const AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(*Object);
6599 
6600       // Could not find an Alloca.
6601       if (!LifetimeObject)
6602         continue;
6603 
6604       // First check that the Alloca is static, otherwise it won't have a
6605       // valid frame index.
6606       auto SI = FuncInfo.StaticAllocaMap.find(LifetimeObject);
6607       if (SI == FuncInfo.StaticAllocaMap.end())
6608         return;
6609 
6610       const int FrameIndex = SI->second;
6611       int64_t Offset;
6612       if (GetPointerBaseWithConstantOffset(
6613               ObjectPtr, Offset, DAG.getDataLayout()) != LifetimeObject)
6614         Offset = -1; // Cannot determine offset from alloca to lifetime object.
6615       Res = DAG.getLifetimeNode(IsStart, sdl, getRoot(), FrameIndex, ObjectSize,
6616                                 Offset);
6617       DAG.setRoot(Res);
6618     }
6619     return;
6620   }
6621   case Intrinsic::invariant_start:
6622     // Discard region information.
6623     setValue(&I, DAG.getUNDEF(TLI.getPointerTy(DAG.getDataLayout())));
6624     return;
6625   case Intrinsic::invariant_end:
6626     // Discard region information.
6627     return;
6628   case Intrinsic::clear_cache:
6629     /// FunctionName may be null.
6630     if (const char *FunctionName = TLI.getClearCacheBuiltinName())
6631       lowerCallToExternalSymbol(I, FunctionName);
6632     return;
6633   case Intrinsic::donothing:
6634     // ignore
6635     return;
6636   case Intrinsic::experimental_stackmap:
6637     visitStackmap(I);
6638     return;
6639   case Intrinsic::experimental_patchpoint_void:
6640   case Intrinsic::experimental_patchpoint_i64:
6641     visitPatchpoint(I);
6642     return;
6643   case Intrinsic::experimental_gc_statepoint:
6644     LowerStatepoint(cast<GCStatepointInst>(I));
6645     return;
6646   case Intrinsic::experimental_gc_result:
6647     visitGCResult(cast<GCResultInst>(I));
6648     return;
6649   case Intrinsic::experimental_gc_relocate:
6650     visitGCRelocate(cast<GCRelocateInst>(I));
6651     return;
6652   case Intrinsic::instrprof_increment:
6653     llvm_unreachable("instrprof failed to lower an increment");
6654   case Intrinsic::instrprof_value_profile:
6655     llvm_unreachable("instrprof failed to lower a value profiling call");
6656   case Intrinsic::localescape: {
6657     MachineFunction &MF = DAG.getMachineFunction();
6658     const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
6659 
6660     // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission
6661     // is the same on all targets.
6662     for (unsigned Idx = 0, E = I.getNumArgOperands(); Idx < E; ++Idx) {
6663       Value *Arg = I.getArgOperand(Idx)->stripPointerCasts();
6664       if (isa<ConstantPointerNull>(Arg))
6665         continue; // Skip null pointers. They represent a hole in index space.
6666       AllocaInst *Slot = cast<AllocaInst>(Arg);
6667       assert(FuncInfo.StaticAllocaMap.count(Slot) &&
6668              "can only escape static allocas");
6669       int FI = FuncInfo.StaticAllocaMap[Slot];
6670       MCSymbol *FrameAllocSym =
6671           MF.getMMI().getContext().getOrCreateFrameAllocSymbol(
6672               GlobalValue::dropLLVMManglingEscape(MF.getName()), Idx);
6673       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, dl,
6674               TII->get(TargetOpcode::LOCAL_ESCAPE))
6675           .addSym(FrameAllocSym)
6676           .addFrameIndex(FI);
6677     }
6678 
6679     return;
6680   }
6681 
6682   case Intrinsic::localrecover: {
6683     // i8* @llvm.localrecover(i8* %fn, i8* %fp, i32 %idx)
6684     MachineFunction &MF = DAG.getMachineFunction();
6685 
6686     // Get the symbol that defines the frame offset.
6687     auto *Fn = cast<Function>(I.getArgOperand(0)->stripPointerCasts());
6688     auto *Idx = cast<ConstantInt>(I.getArgOperand(2));
6689     unsigned IdxVal =
6690         unsigned(Idx->getLimitedValue(std::numeric_limits<int>::max()));
6691     MCSymbol *FrameAllocSym =
6692         MF.getMMI().getContext().getOrCreateFrameAllocSymbol(
6693             GlobalValue::dropLLVMManglingEscape(Fn->getName()), IdxVal);
6694 
6695     Value *FP = I.getArgOperand(1);
6696     SDValue FPVal = getValue(FP);
6697     EVT PtrVT = FPVal.getValueType();
6698 
6699     // Create a MCSymbol for the label to avoid any target lowering
6700     // that would make this PC relative.
6701     SDValue OffsetSym = DAG.getMCSymbol(FrameAllocSym, PtrVT);
6702     SDValue OffsetVal =
6703         DAG.getNode(ISD::LOCAL_RECOVER, sdl, PtrVT, OffsetSym);
6704 
6705     // Add the offset to the FP.
6706     SDValue Add = DAG.getMemBasePlusOffset(FPVal, OffsetVal, sdl);
6707     setValue(&I, Add);
6708 
6709     return;
6710   }
6711 
6712   case Intrinsic::eh_exceptionpointer:
6713   case Intrinsic::eh_exceptioncode: {
6714     // Get the exception pointer vreg, copy from it, and resize it to fit.
6715     const auto *CPI = cast<CatchPadInst>(I.getArgOperand(0));
6716     MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
6717     const TargetRegisterClass *PtrRC = TLI.getRegClassFor(PtrVT);
6718     unsigned VReg = FuncInfo.getCatchPadExceptionPointerVReg(CPI, PtrRC);
6719     SDValue N =
6720         DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(), VReg, PtrVT);
6721     if (Intrinsic == Intrinsic::eh_exceptioncode)
6722       N = DAG.getZExtOrTrunc(N, getCurSDLoc(), MVT::i32);
6723     setValue(&I, N);
6724     return;
6725   }
6726   case Intrinsic::xray_customevent: {
6727     // Here we want to make sure that the intrinsic behaves as if it has a
6728     // specific calling convention, and only for x86_64.
6729     // FIXME: Support other platforms later.
6730     const auto &Triple = DAG.getTarget().getTargetTriple();
6731     if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux())
6732       return;
6733 
6734     SDLoc DL = getCurSDLoc();
6735     SmallVector<SDValue, 8> Ops;
6736 
6737     // We want to say that we always want the arguments in registers.
6738     SDValue LogEntryVal = getValue(I.getArgOperand(0));
6739     SDValue StrSizeVal = getValue(I.getArgOperand(1));
6740     SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
6741     SDValue Chain = getRoot();
6742     Ops.push_back(LogEntryVal);
6743     Ops.push_back(StrSizeVal);
6744     Ops.push_back(Chain);
6745 
6746     // We need to enforce the calling convention for the callsite, so that
6747     // argument ordering is enforced correctly, and that register allocation can
6748     // see that some registers may be assumed clobbered and have to preserve
6749     // them across calls to the intrinsic.
6750     MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHABLE_EVENT_CALL,
6751                                            DL, NodeTys, Ops);
6752     SDValue patchableNode = SDValue(MN, 0);
6753     DAG.setRoot(patchableNode);
6754     setValue(&I, patchableNode);
6755     return;
6756   }
6757   case Intrinsic::xray_typedevent: {
6758     // Here we want to make sure that the intrinsic behaves as if it has a
6759     // specific calling convention, and only for x86_64.
6760     // FIXME: Support other platforms later.
6761     const auto &Triple = DAG.getTarget().getTargetTriple();
6762     if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux())
6763       return;
6764 
6765     SDLoc DL = getCurSDLoc();
6766     SmallVector<SDValue, 8> Ops;
6767 
6768     // We want to say that we always want the arguments in registers.
6769     // It's unclear to me how manipulating the selection DAG here forces callers
6770     // to provide arguments in registers instead of on the stack.
6771     SDValue LogTypeId = getValue(I.getArgOperand(0));
6772     SDValue LogEntryVal = getValue(I.getArgOperand(1));
6773     SDValue StrSizeVal = getValue(I.getArgOperand(2));
6774     SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
6775     SDValue Chain = getRoot();
6776     Ops.push_back(LogTypeId);
6777     Ops.push_back(LogEntryVal);
6778     Ops.push_back(StrSizeVal);
6779     Ops.push_back(Chain);
6780 
6781     // We need to enforce the calling convention for the callsite, so that
6782     // argument ordering is enforced correctly, and that register allocation can
6783     // see that some registers may be assumed clobbered and have to preserve
6784     // them across calls to the intrinsic.
6785     MachineSDNode *MN = DAG.getMachineNode(
6786         TargetOpcode::PATCHABLE_TYPED_EVENT_CALL, DL, NodeTys, Ops);
6787     SDValue patchableNode = SDValue(MN, 0);
6788     DAG.setRoot(patchableNode);
6789     setValue(&I, patchableNode);
6790     return;
6791   }
6792   case Intrinsic::experimental_deoptimize:
6793     LowerDeoptimizeCall(&I);
6794     return;
6795 
6796   case Intrinsic::experimental_vector_reduce_v2_fadd:
6797   case Intrinsic::experimental_vector_reduce_v2_fmul:
6798   case Intrinsic::experimental_vector_reduce_add:
6799   case Intrinsic::experimental_vector_reduce_mul:
6800   case Intrinsic::experimental_vector_reduce_and:
6801   case Intrinsic::experimental_vector_reduce_or:
6802   case Intrinsic::experimental_vector_reduce_xor:
6803   case Intrinsic::experimental_vector_reduce_smax:
6804   case Intrinsic::experimental_vector_reduce_smin:
6805   case Intrinsic::experimental_vector_reduce_umax:
6806   case Intrinsic::experimental_vector_reduce_umin:
6807   case Intrinsic::experimental_vector_reduce_fmax:
6808   case Intrinsic::experimental_vector_reduce_fmin:
6809     visitVectorReduce(I, Intrinsic);
6810     return;
6811 
6812   case Intrinsic::icall_branch_funnel: {
6813     SmallVector<SDValue, 16> Ops;
6814     Ops.push_back(getValue(I.getArgOperand(0)));
6815 
6816     int64_t Offset;
6817     auto *Base = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
6818         I.getArgOperand(1), Offset, DAG.getDataLayout()));
6819     if (!Base)
6820       report_fatal_error(
6821           "llvm.icall.branch.funnel operand must be a GlobalValue");
6822     Ops.push_back(DAG.getTargetGlobalAddress(Base, getCurSDLoc(), MVT::i64, 0));
6823 
6824     struct BranchFunnelTarget {
6825       int64_t Offset;
6826       SDValue Target;
6827     };
6828     SmallVector<BranchFunnelTarget, 8> Targets;
6829 
6830     for (unsigned Op = 1, N = I.getNumArgOperands(); Op != N; Op += 2) {
6831       auto *ElemBase = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
6832           I.getArgOperand(Op), Offset, DAG.getDataLayout()));
6833       if (ElemBase != Base)
6834         report_fatal_error("all llvm.icall.branch.funnel operands must refer "
6835                            "to the same GlobalValue");
6836 
6837       SDValue Val = getValue(I.getArgOperand(Op + 1));
6838       auto *GA = dyn_cast<GlobalAddressSDNode>(Val);
6839       if (!GA)
6840         report_fatal_error(
6841             "llvm.icall.branch.funnel operand must be a GlobalValue");
6842       Targets.push_back({Offset, DAG.getTargetGlobalAddress(
6843                                      GA->getGlobal(), getCurSDLoc(),
6844                                      Val.getValueType(), GA->getOffset())});
6845     }
6846     llvm::sort(Targets,
6847                [](const BranchFunnelTarget &T1, const BranchFunnelTarget &T2) {
6848                  return T1.Offset < T2.Offset;
6849                });
6850 
6851     for (auto &T : Targets) {
6852       Ops.push_back(DAG.getTargetConstant(T.Offset, getCurSDLoc(), MVT::i32));
6853       Ops.push_back(T.Target);
6854     }
6855 
6856     Ops.push_back(DAG.getRoot()); // Chain
6857     SDValue N(DAG.getMachineNode(TargetOpcode::ICALL_BRANCH_FUNNEL,
6858                                  getCurSDLoc(), MVT::Other, Ops),
6859               0);
6860     DAG.setRoot(N);
6861     setValue(&I, N);
6862     HasTailCall = true;
6863     return;
6864   }
6865 
6866   case Intrinsic::wasm_landingpad_index:
6867     // Information this intrinsic contained has been transferred to
6868     // MachineFunction in SelectionDAGISel::PrepareEHLandingPad. We can safely
6869     // delete it now.
6870     return;
6871 
6872   case Intrinsic::aarch64_settag:
6873   case Intrinsic::aarch64_settag_zero: {
6874     const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
6875     bool ZeroMemory = Intrinsic == Intrinsic::aarch64_settag_zero;
6876     SDValue Val = TSI.EmitTargetCodeForSetTag(
6877         DAG, getCurSDLoc(), getRoot(), getValue(I.getArgOperand(0)),
6878         getValue(I.getArgOperand(1)), MachinePointerInfo(I.getArgOperand(0)),
6879         ZeroMemory);
6880     DAG.setRoot(Val);
6881     setValue(&I, Val);
6882     return;
6883   }
6884   case Intrinsic::ptrmask: {
6885     SDValue Ptr = getValue(I.getOperand(0));
6886     SDValue Const = getValue(I.getOperand(1));
6887 
6888     EVT PtrVT = Ptr.getValueType();
6889     setValue(&I, DAG.getNode(ISD::AND, getCurSDLoc(), PtrVT, Ptr,
6890                              DAG.getZExtOrTrunc(Const, getCurSDLoc(), PtrVT)));
6891     return;
6892   }
6893   }
6894 }
6895 
6896 void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
6897     const ConstrainedFPIntrinsic &FPI) {
6898   SDLoc sdl = getCurSDLoc();
6899 
6900   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6901   SmallVector<EVT, 4> ValueVTs;
6902   ComputeValueVTs(TLI, DAG.getDataLayout(), FPI.getType(), ValueVTs);
6903   ValueVTs.push_back(MVT::Other); // Out chain
6904 
6905   // We do not need to serialize constrained FP intrinsics against
6906   // each other or against (nonvolatile) loads, so they can be
6907   // chained like loads.
6908   SDValue Chain = DAG.getRoot();
6909   SmallVector<SDValue, 4> Opers;
6910   Opers.push_back(Chain);
6911   if (FPI.isUnaryOp()) {
6912     Opers.push_back(getValue(FPI.getArgOperand(0)));
6913   } else if (FPI.isTernaryOp()) {
6914     Opers.push_back(getValue(FPI.getArgOperand(0)));
6915     Opers.push_back(getValue(FPI.getArgOperand(1)));
6916     Opers.push_back(getValue(FPI.getArgOperand(2)));
6917   } else {
6918     Opers.push_back(getValue(FPI.getArgOperand(0)));
6919     Opers.push_back(getValue(FPI.getArgOperand(1)));
6920   }
6921 
6922   auto pushOutChain = [this](SDValue Result, fp::ExceptionBehavior EB) {
6923     assert(Result.getNode()->getNumValues() == 2);
6924 
6925     // Push node to the appropriate list so that future instructions can be
6926     // chained up correctly.
6927     SDValue OutChain = Result.getValue(1);
6928     switch (EB) {
6929     case fp::ExceptionBehavior::ebIgnore:
6930       // The only reason why ebIgnore nodes still need to be chained is that
6931       // they might depend on the current rounding mode, and therefore must
6932       // not be moved across instruction that may change that mode.
6933       LLVM_FALLTHROUGH;
6934     case fp::ExceptionBehavior::ebMayTrap:
6935       // These must not be moved across calls or instructions that may change
6936       // floating-point exception masks.
6937       PendingConstrainedFP.push_back(OutChain);
6938       break;
6939     case fp::ExceptionBehavior::ebStrict:
6940       // These must not be moved across calls or instructions that may change
6941       // floating-point exception masks or read floating-point exception flags.
6942       // In addition, they cannot be optimized out even if unused.
6943       PendingConstrainedFPStrict.push_back(OutChain);
6944       break;
6945     }
6946   };
6947 
6948   SDVTList VTs = DAG.getVTList(ValueVTs);
6949   fp::ExceptionBehavior EB = FPI.getExceptionBehavior().getValue();
6950 
6951   SDNodeFlags Flags;
6952   if (EB == fp::ExceptionBehavior::ebIgnore)
6953     Flags.setNoFPExcept(true);
6954 
6955   if (auto *FPOp = dyn_cast<FPMathOperator>(&FPI))
6956     Flags.copyFMF(*FPOp);
6957 
6958   unsigned Opcode;
6959   switch (FPI.getIntrinsicID()) {
6960   default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
6961 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
6962   case Intrinsic::INTRINSIC:                                                   \
6963     Opcode = ISD::STRICT_##DAGN;                                               \
6964     break;
6965 #include "llvm/IR/ConstrainedOps.def"
6966   case Intrinsic::experimental_constrained_fmuladd: {
6967     Opcode = ISD::STRICT_FMA;
6968     // Break fmuladd into fmul and fadd.
6969     if (TM.Options.AllowFPOpFusion == FPOpFusion::Strict ||
6970         !TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(),
6971                                         ValueVTs[0])) {
6972       Opers.pop_back();
6973       SDValue Mul = DAG.getNode(ISD::STRICT_FMUL, sdl, VTs, Opers, Flags);
6974       pushOutChain(Mul, EB);
6975       Opcode = ISD::STRICT_FADD;
6976       Opers.clear();
6977       Opers.push_back(Mul.getValue(1));
6978       Opers.push_back(Mul.getValue(0));
6979       Opers.push_back(getValue(FPI.getArgOperand(2)));
6980     }
6981     break;
6982   }
6983   }
6984 
6985   // A few strict DAG nodes carry additional operands that are not
6986   // set up by the default code above.
6987   switch (Opcode) {
6988   default: break;
6989   case ISD::STRICT_FP_ROUND:
6990     Opers.push_back(
6991         DAG.getTargetConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout())));
6992     break;
6993   case ISD::STRICT_FSETCC:
6994   case ISD::STRICT_FSETCCS: {
6995     auto *FPCmp = dyn_cast<ConstrainedFPCmpIntrinsic>(&FPI);
6996     Opers.push_back(DAG.getCondCode(getFCmpCondCode(FPCmp->getPredicate())));
6997     break;
6998   }
6999   }
7000 
7001   SDValue Result = DAG.getNode(Opcode, sdl, VTs, Opers, Flags);
7002   pushOutChain(Result, EB);
7003 
7004   SDValue FPResult = Result.getValue(0);
7005   setValue(&FPI, FPResult);
7006 }
7007 
7008 std::pair<SDValue, SDValue>
7009 SelectionDAGBuilder::lowerInvokable(TargetLowering::CallLoweringInfo &CLI,
7010                                     const BasicBlock *EHPadBB) {
7011   MachineFunction &MF = DAG.getMachineFunction();
7012   MachineModuleInfo &MMI = MF.getMMI();
7013   MCSymbol *BeginLabel = nullptr;
7014 
7015   if (EHPadBB) {
7016     // Insert a label before the invoke call to mark the try range.  This can be
7017     // used to detect deletion of the invoke via the MachineModuleInfo.
7018     BeginLabel = MMI.getContext().createTempSymbol();
7019 
7020     // For SjLj, keep track of which landing pads go with which invokes
7021     // so as to maintain the ordering of pads in the LSDA.
7022     unsigned CallSiteIndex = MMI.getCurrentCallSite();
7023     if (CallSiteIndex) {
7024       MF.setCallSiteBeginLabel(BeginLabel, CallSiteIndex);
7025       LPadToCallSiteMap[FuncInfo.MBBMap[EHPadBB]].push_back(CallSiteIndex);
7026 
7027       // Now that the call site is handled, stop tracking it.
7028       MMI.setCurrentCallSite(0);
7029     }
7030 
7031     // Both PendingLoads and PendingExports must be flushed here;
7032     // this call might not return.
7033     (void)getRoot();
7034     DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getControlRoot(), BeginLabel));
7035 
7036     CLI.setChain(getRoot());
7037   }
7038   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7039   std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
7040 
7041   assert((CLI.IsTailCall || Result.second.getNode()) &&
7042          "Non-null chain expected with non-tail call!");
7043   assert((Result.second.getNode() || !Result.first.getNode()) &&
7044          "Null value expected with tail call!");
7045 
7046   if (!Result.second.getNode()) {
7047     // As a special case, a null chain means that a tail call has been emitted
7048     // and the DAG root is already updated.
7049     HasTailCall = true;
7050 
7051     // Since there's no actual continuation from this block, nothing can be
7052     // relying on us setting vregs for them.
7053     PendingExports.clear();
7054   } else {
7055     DAG.setRoot(Result.second);
7056   }
7057 
7058   if (EHPadBB) {
7059     // Insert a label at the end of the invoke call to mark the try range.  This
7060     // can be used to detect deletion of the invoke via the MachineModuleInfo.
7061     MCSymbol *EndLabel = MMI.getContext().createTempSymbol();
7062     DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getRoot(), EndLabel));
7063 
7064     // Inform MachineModuleInfo of range.
7065     auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
7066     // There is a platform (e.g. wasm) that uses funclet style IR but does not
7067     // actually use outlined funclets and their LSDA info style.
7068     if (MF.hasEHFunclets() && isFuncletEHPersonality(Pers)) {
7069       assert(CLI.CB);
7070       WinEHFuncInfo *EHInfo = DAG.getMachineFunction().getWinEHFuncInfo();
7071       EHInfo->addIPToStateRange(cast<InvokeInst>(CLI.CB), BeginLabel, EndLabel);
7072     } else if (!isScopedEHPersonality(Pers)) {
7073       MF.addInvoke(FuncInfo.MBBMap[EHPadBB], BeginLabel, EndLabel);
7074     }
7075   }
7076 
7077   return Result;
7078 }
7079 
7080 void SelectionDAGBuilder::LowerCallTo(const CallBase &CB, SDValue Callee,
7081                                       bool isTailCall,
7082                                       const BasicBlock *EHPadBB) {
7083   auto &DL = DAG.getDataLayout();
7084   FunctionType *FTy = CB.getFunctionType();
7085   Type *RetTy = CB.getType();
7086 
7087   TargetLowering::ArgListTy Args;
7088   Args.reserve(CB.arg_size());
7089 
7090   const Value *SwiftErrorVal = nullptr;
7091   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7092 
7093   if (isTailCall) {
7094     // Avoid emitting tail calls in functions with the disable-tail-calls
7095     // attribute.
7096     auto *Caller = CB.getParent()->getParent();
7097     if (Caller->getFnAttribute("disable-tail-calls").getValueAsString() ==
7098         "true")
7099       isTailCall = false;
7100 
7101     // We can't tail call inside a function with a swifterror argument. Lowering
7102     // does not support this yet. It would have to move into the swifterror
7103     // register before the call.
7104     if (TLI.supportSwiftError() &&
7105         Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
7106       isTailCall = false;
7107   }
7108 
7109   for (auto I = CB.arg_begin(), E = CB.arg_end(); I != E; ++I) {
7110     TargetLowering::ArgListEntry Entry;
7111     const Value *V = *I;
7112 
7113     // Skip empty types
7114     if (V->getType()->isEmptyTy())
7115       continue;
7116 
7117     SDValue ArgNode = getValue(V);
7118     Entry.Node = ArgNode; Entry.Ty = V->getType();
7119 
7120     Entry.setAttributes(&CB, I - CB.arg_begin());
7121 
7122     // Use swifterror virtual register as input to the call.
7123     if (Entry.IsSwiftError && TLI.supportSwiftError()) {
7124       SwiftErrorVal = V;
7125       // We find the virtual register for the actual swifterror argument.
7126       // Instead of using the Value, we use the virtual register instead.
7127       Entry.Node =
7128           DAG.getRegister(SwiftError.getOrCreateVRegUseAt(&CB, FuncInfo.MBB, V),
7129                           EVT(TLI.getPointerTy(DL)));
7130     }
7131 
7132     Args.push_back(Entry);
7133 
7134     // If we have an explicit sret argument that is an Instruction, (i.e., it
7135     // might point to function-local memory), we can't meaningfully tail-call.
7136     if (Entry.IsSRet && isa<Instruction>(V))
7137       isTailCall = false;
7138   }
7139 
7140   // If call site has a cfguardtarget operand bundle, create and add an
7141   // additional ArgListEntry.
7142   if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_cfguardtarget)) {
7143     TargetLowering::ArgListEntry Entry;
7144     Value *V = Bundle->Inputs[0];
7145     SDValue ArgNode = getValue(V);
7146     Entry.Node = ArgNode;
7147     Entry.Ty = V->getType();
7148     Entry.IsCFGuardTarget = true;
7149     Args.push_back(Entry);
7150   }
7151 
7152   // Check if target-independent constraints permit a tail call here.
7153   // Target-dependent constraints are checked within TLI->LowerCallTo.
7154   if (isTailCall && !isInTailCallPosition(CB, DAG.getTarget()))
7155     isTailCall = false;
7156 
7157   // Disable tail calls if there is an swifterror argument. Targets have not
7158   // been updated to support tail calls.
7159   if (TLI.supportSwiftError() && SwiftErrorVal)
7160     isTailCall = false;
7161 
7162   TargetLowering::CallLoweringInfo CLI(DAG);
7163   CLI.setDebugLoc(getCurSDLoc())
7164       .setChain(getRoot())
7165       .setCallee(RetTy, FTy, Callee, std::move(Args), CB)
7166       .setTailCall(isTailCall)
7167       .setConvergent(CB.isConvergent())
7168       .setIsPreallocated(
7169           CB.countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0);
7170   std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
7171 
7172   if (Result.first.getNode()) {
7173     Result.first = lowerRangeToAssertZExt(DAG, CB, Result.first);
7174     setValue(&CB, Result.first);
7175   }
7176 
7177   // The last element of CLI.InVals has the SDValue for swifterror return.
7178   // Here we copy it to a virtual register and update SwiftErrorMap for
7179   // book-keeping.
7180   if (SwiftErrorVal && TLI.supportSwiftError()) {
7181     // Get the last element of InVals.
7182     SDValue Src = CLI.InVals.back();
7183     Register VReg =
7184         SwiftError.getOrCreateVRegDefAt(&CB, FuncInfo.MBB, SwiftErrorVal);
7185     SDValue CopyNode = CLI.DAG.getCopyToReg(Result.second, CLI.DL, VReg, Src);
7186     DAG.setRoot(CopyNode);
7187   }
7188 }
7189 
7190 static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
7191                              SelectionDAGBuilder &Builder) {
7192   // Check to see if this load can be trivially constant folded, e.g. if the
7193   // input is from a string literal.
7194   if (const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) {
7195     // Cast pointer to the type we really want to load.
7196     Type *LoadTy =
7197         Type::getIntNTy(PtrVal->getContext(), LoadVT.getScalarSizeInBits());
7198     if (LoadVT.isVector())
7199       LoadTy = FixedVectorType::get(LoadTy, LoadVT.getVectorNumElements());
7200 
7201     LoadInput = ConstantExpr::getBitCast(const_cast<Constant *>(LoadInput),
7202                                          PointerType::getUnqual(LoadTy));
7203 
7204     if (const Constant *LoadCst = ConstantFoldLoadFromConstPtr(
7205             const_cast<Constant *>(LoadInput), LoadTy, *Builder.DL))
7206       return Builder.getValue(LoadCst);
7207   }
7208 
7209   // Otherwise, we have to emit the load.  If the pointer is to unfoldable but
7210   // still constant memory, the input chain can be the entry node.
7211   SDValue Root;
7212   bool ConstantMemory = false;
7213 
7214   // Do not serialize (non-volatile) loads of constant memory with anything.
7215   if (Builder.AA && Builder.AA->pointsToConstantMemory(PtrVal)) {
7216     Root = Builder.DAG.getEntryNode();
7217     ConstantMemory = true;
7218   } else {
7219     // Do not serialize non-volatile loads against each other.
7220     Root = Builder.DAG.getRoot();
7221   }
7222 
7223   SDValue Ptr = Builder.getValue(PtrVal);
7224   SDValue LoadVal = Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root,
7225                                         Ptr, MachinePointerInfo(PtrVal),
7226                                         /* Alignment = */ 1);
7227 
7228   if (!ConstantMemory)
7229     Builder.PendingLoads.push_back(LoadVal.getValue(1));
7230   return LoadVal;
7231 }
7232 
7233 /// Record the value for an instruction that produces an integer result,
7234 /// converting the type where necessary.
7235 void SelectionDAGBuilder::processIntegerCallValue(const Instruction &I,
7236                                                   SDValue Value,
7237                                                   bool IsSigned) {
7238   EVT VT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
7239                                                     I.getType(), true);
7240   if (IsSigned)
7241     Value = DAG.getSExtOrTrunc(Value, getCurSDLoc(), VT);
7242   else
7243     Value = DAG.getZExtOrTrunc(Value, getCurSDLoc(), VT);
7244   setValue(&I, Value);
7245 }
7246 
7247 /// See if we can lower a memcmp call into an optimized form. If so, return
7248 /// true and lower it. Otherwise return false, and it will be lowered like a
7249 /// normal call.
7250 /// The caller already checked that \p I calls the appropriate LibFunc with a
7251 /// correct prototype.
7252 bool SelectionDAGBuilder::visitMemCmpCall(const CallInst &I) {
7253   const Value *LHS = I.getArgOperand(0), *RHS = I.getArgOperand(1);
7254   const Value *Size = I.getArgOperand(2);
7255   const ConstantInt *CSize = dyn_cast<ConstantInt>(Size);
7256   if (CSize && CSize->getZExtValue() == 0) {
7257     EVT CallVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
7258                                                           I.getType(), true);
7259     setValue(&I, DAG.getConstant(0, getCurSDLoc(), CallVT));
7260     return true;
7261   }
7262 
7263   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7264   std::pair<SDValue, SDValue> Res = TSI.EmitTargetCodeForMemcmp(
7265       DAG, getCurSDLoc(), DAG.getRoot(), getValue(LHS), getValue(RHS),
7266       getValue(Size), MachinePointerInfo(LHS), MachinePointerInfo(RHS));
7267   if (Res.first.getNode()) {
7268     processIntegerCallValue(I, Res.first, true);
7269     PendingLoads.push_back(Res.second);
7270     return true;
7271   }
7272 
7273   // memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS)  != 0
7274   // memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS)  != 0
7275   if (!CSize || !isOnlyUsedInZeroEqualityComparison(&I))
7276     return false;
7277 
7278   // If the target has a fast compare for the given size, it will return a
7279   // preferred load type for that size. Require that the load VT is legal and
7280   // that the target supports unaligned loads of that type. Otherwise, return
7281   // INVALID.
7282   auto hasFastLoadsAndCompare = [&](unsigned NumBits) {
7283     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7284     MVT LVT = TLI.hasFastEqualityCompare(NumBits);
7285     if (LVT != MVT::INVALID_SIMPLE_VALUE_TYPE) {
7286       // TODO: Handle 5 byte compare as 4-byte + 1 byte.
7287       // TODO: Handle 8 byte compare on x86-32 as two 32-bit loads.
7288       // TODO: Check alignment of src and dest ptrs.
7289       unsigned DstAS = LHS->getType()->getPointerAddressSpace();
7290       unsigned SrcAS = RHS->getType()->getPointerAddressSpace();
7291       if (!TLI.isTypeLegal(LVT) ||
7292           !TLI.allowsMisalignedMemoryAccesses(LVT, SrcAS) ||
7293           !TLI.allowsMisalignedMemoryAccesses(LVT, DstAS))
7294         LVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
7295     }
7296 
7297     return LVT;
7298   };
7299 
7300   // This turns into unaligned loads. We only do this if the target natively
7301   // supports the MVT we'll be loading or if it is small enough (<= 4) that
7302   // we'll only produce a small number of byte loads.
7303   MVT LoadVT;
7304   unsigned NumBitsToCompare = CSize->getZExtValue() * 8;
7305   switch (NumBitsToCompare) {
7306   default:
7307     return false;
7308   case 16:
7309     LoadVT = MVT::i16;
7310     break;
7311   case 32:
7312     LoadVT = MVT::i32;
7313     break;
7314   case 64:
7315   case 128:
7316   case 256:
7317     LoadVT = hasFastLoadsAndCompare(NumBitsToCompare);
7318     break;
7319   }
7320 
7321   if (LoadVT == MVT::INVALID_SIMPLE_VALUE_TYPE)
7322     return false;
7323 
7324   SDValue LoadL = getMemCmpLoad(LHS, LoadVT, *this);
7325   SDValue LoadR = getMemCmpLoad(RHS, LoadVT, *this);
7326 
7327   // Bitcast to a wide integer type if the loads are vectors.
7328   if (LoadVT.isVector()) {
7329     EVT CmpVT = EVT::getIntegerVT(LHS->getContext(), LoadVT.getSizeInBits());
7330     LoadL = DAG.getBitcast(CmpVT, LoadL);
7331     LoadR = DAG.getBitcast(CmpVT, LoadR);
7332   }
7333 
7334   SDValue Cmp = DAG.getSetCC(getCurSDLoc(), MVT::i1, LoadL, LoadR, ISD::SETNE);
7335   processIntegerCallValue(I, Cmp, false);
7336   return true;
7337 }
7338 
7339 /// See if we can lower a memchr call into an optimized form. If so, return
7340 /// true and lower it. Otherwise return false, and it will be lowered like a
7341 /// normal call.
7342 /// The caller already checked that \p I calls the appropriate LibFunc with a
7343 /// correct prototype.
7344 bool SelectionDAGBuilder::visitMemChrCall(const CallInst &I) {
7345   const Value *Src = I.getArgOperand(0);
7346   const Value *Char = I.getArgOperand(1);
7347   const Value *Length = I.getArgOperand(2);
7348 
7349   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7350   std::pair<SDValue, SDValue> Res =
7351     TSI.EmitTargetCodeForMemchr(DAG, getCurSDLoc(), DAG.getRoot(),
7352                                 getValue(Src), getValue(Char), getValue(Length),
7353                                 MachinePointerInfo(Src));
7354   if (Res.first.getNode()) {
7355     setValue(&I, Res.first);
7356     PendingLoads.push_back(Res.second);
7357     return true;
7358   }
7359 
7360   return false;
7361 }
7362 
7363 /// See if we can lower a mempcpy call into an optimized form. If so, return
7364 /// true and lower it. Otherwise return false, and it will be lowered like a
7365 /// normal call.
7366 /// The caller already checked that \p I calls the appropriate LibFunc with a
7367 /// correct prototype.
7368 bool SelectionDAGBuilder::visitMemPCpyCall(const CallInst &I) {
7369   SDValue Dst = getValue(I.getArgOperand(0));
7370   SDValue Src = getValue(I.getArgOperand(1));
7371   SDValue Size = getValue(I.getArgOperand(2));
7372 
7373   Align DstAlign = DAG.InferPtrAlign(Dst).valueOrOne();
7374   Align SrcAlign = DAG.InferPtrAlign(Src).valueOrOne();
7375   // DAG::getMemcpy needs Alignment to be defined.
7376   Align Alignment = std::min(DstAlign, SrcAlign);
7377 
7378   bool isVol = false;
7379   SDLoc sdl = getCurSDLoc();
7380 
7381   // In the mempcpy context we need to pass in a false value for isTailCall
7382   // because the return pointer needs to be adjusted by the size of
7383   // the copied memory.
7384   SDValue Root = isVol ? getRoot() : getMemoryRoot();
7385   SDValue MC = DAG.getMemcpy(Root, sdl, Dst, Src, Size, Alignment, isVol, false,
7386                              /*isTailCall=*/false,
7387                              MachinePointerInfo(I.getArgOperand(0)),
7388                              MachinePointerInfo(I.getArgOperand(1)));
7389   assert(MC.getNode() != nullptr &&
7390          "** memcpy should not be lowered as TailCall in mempcpy context **");
7391   DAG.setRoot(MC);
7392 
7393   // Check if Size needs to be truncated or extended.
7394   Size = DAG.getSExtOrTrunc(Size, sdl, Dst.getValueType());
7395 
7396   // Adjust return pointer to point just past the last dst byte.
7397   SDValue DstPlusSize = DAG.getNode(ISD::ADD, sdl, Dst.getValueType(),
7398                                     Dst, Size);
7399   setValue(&I, DstPlusSize);
7400   return true;
7401 }
7402 
7403 /// See if we can lower a strcpy call into an optimized form.  If so, return
7404 /// true and lower it, otherwise return false and it will be lowered like a
7405 /// normal call.
7406 /// The caller already checked that \p I calls the appropriate LibFunc with a
7407 /// correct prototype.
7408 bool SelectionDAGBuilder::visitStrCpyCall(const CallInst &I, bool isStpcpy) {
7409   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
7410 
7411   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7412   std::pair<SDValue, SDValue> Res =
7413     TSI.EmitTargetCodeForStrcpy(DAG, getCurSDLoc(), getRoot(),
7414                                 getValue(Arg0), getValue(Arg1),
7415                                 MachinePointerInfo(Arg0),
7416                                 MachinePointerInfo(Arg1), isStpcpy);
7417   if (Res.first.getNode()) {
7418     setValue(&I, Res.first);
7419     DAG.setRoot(Res.second);
7420     return true;
7421   }
7422 
7423   return false;
7424 }
7425 
7426 /// See if we can lower a strcmp call into an optimized form.  If so, return
7427 /// true and lower it, otherwise return false and it will be lowered like a
7428 /// normal call.
7429 /// The caller already checked that \p I calls the appropriate LibFunc with a
7430 /// correct prototype.
7431 bool SelectionDAGBuilder::visitStrCmpCall(const CallInst &I) {
7432   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
7433 
7434   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7435   std::pair<SDValue, SDValue> Res =
7436     TSI.EmitTargetCodeForStrcmp(DAG, getCurSDLoc(), DAG.getRoot(),
7437                                 getValue(Arg0), getValue(Arg1),
7438                                 MachinePointerInfo(Arg0),
7439                                 MachinePointerInfo(Arg1));
7440   if (Res.first.getNode()) {
7441     processIntegerCallValue(I, Res.first, true);
7442     PendingLoads.push_back(Res.second);
7443     return true;
7444   }
7445 
7446   return false;
7447 }
7448 
7449 /// See if we can lower a strlen call into an optimized form.  If so, return
7450 /// true and lower it, otherwise return false and it will be lowered like a
7451 /// normal call.
7452 /// The caller already checked that \p I calls the appropriate LibFunc with a
7453 /// correct prototype.
7454 bool SelectionDAGBuilder::visitStrLenCall(const CallInst &I) {
7455   const Value *Arg0 = I.getArgOperand(0);
7456 
7457   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7458   std::pair<SDValue, SDValue> Res =
7459     TSI.EmitTargetCodeForStrlen(DAG, getCurSDLoc(), DAG.getRoot(),
7460                                 getValue(Arg0), MachinePointerInfo(Arg0));
7461   if (Res.first.getNode()) {
7462     processIntegerCallValue(I, Res.first, false);
7463     PendingLoads.push_back(Res.second);
7464     return true;
7465   }
7466 
7467   return false;
7468 }
7469 
7470 /// See if we can lower a strnlen call into an optimized form.  If so, return
7471 /// true and lower it, otherwise return false and it will be lowered like a
7472 /// normal call.
7473 /// The caller already checked that \p I calls the appropriate LibFunc with a
7474 /// correct prototype.
7475 bool SelectionDAGBuilder::visitStrNLenCall(const CallInst &I) {
7476   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
7477 
7478   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7479   std::pair<SDValue, SDValue> Res =
7480     TSI.EmitTargetCodeForStrnlen(DAG, getCurSDLoc(), DAG.getRoot(),
7481                                  getValue(Arg0), getValue(Arg1),
7482                                  MachinePointerInfo(Arg0));
7483   if (Res.first.getNode()) {
7484     processIntegerCallValue(I, Res.first, false);
7485     PendingLoads.push_back(Res.second);
7486     return true;
7487   }
7488 
7489   return false;
7490 }
7491 
7492 /// See if we can lower a unary floating-point operation into an SDNode with
7493 /// the specified Opcode.  If so, return true and lower it, otherwise return
7494 /// false and it will be lowered like a normal call.
7495 /// The caller already checked that \p I calls the appropriate LibFunc with a
7496 /// correct prototype.
7497 bool SelectionDAGBuilder::visitUnaryFloatCall(const CallInst &I,
7498                                               unsigned Opcode) {
7499   // We already checked this call's prototype; verify it doesn't modify errno.
7500   if (!I.onlyReadsMemory())
7501     return false;
7502 
7503   SDValue Tmp = getValue(I.getArgOperand(0));
7504   setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), Tmp.getValueType(), Tmp));
7505   return true;
7506 }
7507 
7508 /// See if we can lower a binary floating-point operation into an SDNode with
7509 /// the specified Opcode. If so, return true and lower it. Otherwise return
7510 /// false, and it will be lowered like a normal call.
7511 /// The caller already checked that \p I calls the appropriate LibFunc with a
7512 /// correct prototype.
7513 bool SelectionDAGBuilder::visitBinaryFloatCall(const CallInst &I,
7514                                                unsigned Opcode) {
7515   // We already checked this call's prototype; verify it doesn't modify errno.
7516   if (!I.onlyReadsMemory())
7517     return false;
7518 
7519   SDValue Tmp0 = getValue(I.getArgOperand(0));
7520   SDValue Tmp1 = getValue(I.getArgOperand(1));
7521   EVT VT = Tmp0.getValueType();
7522   setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), VT, Tmp0, Tmp1));
7523   return true;
7524 }
7525 
7526 void SelectionDAGBuilder::visitCall(const CallInst &I) {
7527   // Handle inline assembly differently.
7528   if (I.isInlineAsm()) {
7529     visitInlineAsm(I);
7530     return;
7531   }
7532 
7533   if (Function *F = I.getCalledFunction()) {
7534     if (F->isDeclaration()) {
7535       // Is this an LLVM intrinsic or a target-specific intrinsic?
7536       unsigned IID = F->getIntrinsicID();
7537       if (!IID)
7538         if (const TargetIntrinsicInfo *II = TM.getIntrinsicInfo())
7539           IID = II->getIntrinsicID(F);
7540 
7541       if (IID) {
7542         visitIntrinsicCall(I, IID);
7543         return;
7544       }
7545     }
7546 
7547     // Check for well-known libc/libm calls.  If the function is internal, it
7548     // can't be a library call.  Don't do the check if marked as nobuiltin for
7549     // some reason or the call site requires strict floating point semantics.
7550     LibFunc Func;
7551     if (!I.isNoBuiltin() && !I.isStrictFP() && !F->hasLocalLinkage() &&
7552         F->hasName() && LibInfo->getLibFunc(*F, Func) &&
7553         LibInfo->hasOptimizedCodeGen(Func)) {
7554       switch (Func) {
7555       default: break;
7556       case LibFunc_copysign:
7557       case LibFunc_copysignf:
7558       case LibFunc_copysignl:
7559         // We already checked this call's prototype; verify it doesn't modify
7560         // errno.
7561         if (I.onlyReadsMemory()) {
7562           SDValue LHS = getValue(I.getArgOperand(0));
7563           SDValue RHS = getValue(I.getArgOperand(1));
7564           setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurSDLoc(),
7565                                    LHS.getValueType(), LHS, RHS));
7566           return;
7567         }
7568         break;
7569       case LibFunc_fabs:
7570       case LibFunc_fabsf:
7571       case LibFunc_fabsl:
7572         if (visitUnaryFloatCall(I, ISD::FABS))
7573           return;
7574         break;
7575       case LibFunc_fmin:
7576       case LibFunc_fminf:
7577       case LibFunc_fminl:
7578         if (visitBinaryFloatCall(I, ISD::FMINNUM))
7579           return;
7580         break;
7581       case LibFunc_fmax:
7582       case LibFunc_fmaxf:
7583       case LibFunc_fmaxl:
7584         if (visitBinaryFloatCall(I, ISD::FMAXNUM))
7585           return;
7586         break;
7587       case LibFunc_sin:
7588       case LibFunc_sinf:
7589       case LibFunc_sinl:
7590         if (visitUnaryFloatCall(I, ISD::FSIN))
7591           return;
7592         break;
7593       case LibFunc_cos:
7594       case LibFunc_cosf:
7595       case LibFunc_cosl:
7596         if (visitUnaryFloatCall(I, ISD::FCOS))
7597           return;
7598         break;
7599       case LibFunc_sqrt:
7600       case LibFunc_sqrtf:
7601       case LibFunc_sqrtl:
7602       case LibFunc_sqrt_finite:
7603       case LibFunc_sqrtf_finite:
7604       case LibFunc_sqrtl_finite:
7605         if (visitUnaryFloatCall(I, ISD::FSQRT))
7606           return;
7607         break;
7608       case LibFunc_floor:
7609       case LibFunc_floorf:
7610       case LibFunc_floorl:
7611         if (visitUnaryFloatCall(I, ISD::FFLOOR))
7612           return;
7613         break;
7614       case LibFunc_nearbyint:
7615       case LibFunc_nearbyintf:
7616       case LibFunc_nearbyintl:
7617         if (visitUnaryFloatCall(I, ISD::FNEARBYINT))
7618           return;
7619         break;
7620       case LibFunc_ceil:
7621       case LibFunc_ceilf:
7622       case LibFunc_ceill:
7623         if (visitUnaryFloatCall(I, ISD::FCEIL))
7624           return;
7625         break;
7626       case LibFunc_rint:
7627       case LibFunc_rintf:
7628       case LibFunc_rintl:
7629         if (visitUnaryFloatCall(I, ISD::FRINT))
7630           return;
7631         break;
7632       case LibFunc_round:
7633       case LibFunc_roundf:
7634       case LibFunc_roundl:
7635         if (visitUnaryFloatCall(I, ISD::FROUND))
7636           return;
7637         break;
7638       case LibFunc_trunc:
7639       case LibFunc_truncf:
7640       case LibFunc_truncl:
7641         if (visitUnaryFloatCall(I, ISD::FTRUNC))
7642           return;
7643         break;
7644       case LibFunc_log2:
7645       case LibFunc_log2f:
7646       case LibFunc_log2l:
7647         if (visitUnaryFloatCall(I, ISD::FLOG2))
7648           return;
7649         break;
7650       case LibFunc_exp2:
7651       case LibFunc_exp2f:
7652       case LibFunc_exp2l:
7653         if (visitUnaryFloatCall(I, ISD::FEXP2))
7654           return;
7655         break;
7656       case LibFunc_memcmp:
7657         if (visitMemCmpCall(I))
7658           return;
7659         break;
7660       case LibFunc_mempcpy:
7661         if (visitMemPCpyCall(I))
7662           return;
7663         break;
7664       case LibFunc_memchr:
7665         if (visitMemChrCall(I))
7666           return;
7667         break;
7668       case LibFunc_strcpy:
7669         if (visitStrCpyCall(I, false))
7670           return;
7671         break;
7672       case LibFunc_stpcpy:
7673         if (visitStrCpyCall(I, true))
7674           return;
7675         break;
7676       case LibFunc_strcmp:
7677         if (visitStrCmpCall(I))
7678           return;
7679         break;
7680       case LibFunc_strlen:
7681         if (visitStrLenCall(I))
7682           return;
7683         break;
7684       case LibFunc_strnlen:
7685         if (visitStrNLenCall(I))
7686           return;
7687         break;
7688       }
7689     }
7690   }
7691 
7692   // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
7693   // have to do anything here to lower funclet bundles.
7694   // CFGuardTarget bundles are lowered in LowerCallTo.
7695   assert(!I.hasOperandBundlesOtherThan(
7696              {LLVMContext::OB_deopt, LLVMContext::OB_funclet,
7697               LLVMContext::OB_cfguardtarget, LLVMContext::OB_preallocated}) &&
7698          "Cannot lower calls with arbitrary operand bundles!");
7699 
7700   SDValue Callee = getValue(I.getCalledOperand());
7701 
7702   if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
7703     LowerCallSiteWithDeoptBundle(&I, Callee, nullptr);
7704   else
7705     // Check if we can potentially perform a tail call. More detailed checking
7706     // is be done within LowerCallTo, after more information about the call is
7707     // known.
7708     LowerCallTo(I, Callee, I.isTailCall());
7709 }
7710 
7711 namespace {
7712 
7713 /// AsmOperandInfo - This contains information for each constraint that we are
7714 /// lowering.
7715 class SDISelAsmOperandInfo : public TargetLowering::AsmOperandInfo {
7716 public:
7717   /// CallOperand - If this is the result output operand or a clobber
7718   /// this is null, otherwise it is the incoming operand to the CallInst.
7719   /// This gets modified as the asm is processed.
7720   SDValue CallOperand;
7721 
7722   /// AssignedRegs - If this is a register or register class operand, this
7723   /// contains the set of register corresponding to the operand.
7724   RegsForValue AssignedRegs;
7725 
7726   explicit SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &info)
7727     : TargetLowering::AsmOperandInfo(info), CallOperand(nullptr, 0) {
7728   }
7729 
7730   /// Whether or not this operand accesses memory
7731   bool hasMemory(const TargetLowering &TLI) const {
7732     // Indirect operand accesses access memory.
7733     if (isIndirect)
7734       return true;
7735 
7736     for (const auto &Code : Codes)
7737       if (TLI.getConstraintType(Code) == TargetLowering::C_Memory)
7738         return true;
7739 
7740     return false;
7741   }
7742 
7743   /// getCallOperandValEVT - Return the EVT of the Value* that this operand
7744   /// corresponds to.  If there is no Value* for this operand, it returns
7745   /// MVT::Other.
7746   EVT getCallOperandValEVT(LLVMContext &Context, const TargetLowering &TLI,
7747                            const DataLayout &DL) const {
7748     if (!CallOperandVal) return MVT::Other;
7749 
7750     if (isa<BasicBlock>(CallOperandVal))
7751       return TLI.getProgramPointerTy(DL);
7752 
7753     llvm::Type *OpTy = CallOperandVal->getType();
7754 
7755     // FIXME: code duplicated from TargetLowering::ParseConstraints().
7756     // If this is an indirect operand, the operand is a pointer to the
7757     // accessed type.
7758     if (isIndirect) {
7759       PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
7760       if (!PtrTy)
7761         report_fatal_error("Indirect operand for inline asm not a pointer!");
7762       OpTy = PtrTy->getElementType();
7763     }
7764 
7765     // Look for vector wrapped in a struct. e.g. { <16 x i8> }.
7766     if (StructType *STy = dyn_cast<StructType>(OpTy))
7767       if (STy->getNumElements() == 1)
7768         OpTy = STy->getElementType(0);
7769 
7770     // If OpTy is not a single value, it may be a struct/union that we
7771     // can tile with integers.
7772     if (!OpTy->isSingleValueType() && OpTy->isSized()) {
7773       unsigned BitSize = DL.getTypeSizeInBits(OpTy);
7774       switch (BitSize) {
7775       default: break;
7776       case 1:
7777       case 8:
7778       case 16:
7779       case 32:
7780       case 64:
7781       case 128:
7782         OpTy = IntegerType::get(Context, BitSize);
7783         break;
7784       }
7785     }
7786 
7787     return TLI.getValueType(DL, OpTy, true);
7788   }
7789 };
7790 
7791 using SDISelAsmOperandInfoVector = SmallVector<SDISelAsmOperandInfo, 16>;
7792 
7793 } // end anonymous namespace
7794 
7795 /// Make sure that the output operand \p OpInfo and its corresponding input
7796 /// operand \p MatchingOpInfo have compatible constraint types (otherwise error
7797 /// out).
7798 static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo,
7799                                SDISelAsmOperandInfo &MatchingOpInfo,
7800                                SelectionDAG &DAG) {
7801   if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT)
7802     return;
7803 
7804   const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
7805   const auto &TLI = DAG.getTargetLoweringInfo();
7806 
7807   std::pair<unsigned, const TargetRegisterClass *> MatchRC =
7808       TLI.getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode,
7809                                        OpInfo.ConstraintVT);
7810   std::pair<unsigned, const TargetRegisterClass *> InputRC =
7811       TLI.getRegForInlineAsmConstraint(TRI, MatchingOpInfo.ConstraintCode,
7812                                        MatchingOpInfo.ConstraintVT);
7813   if ((OpInfo.ConstraintVT.isInteger() !=
7814        MatchingOpInfo.ConstraintVT.isInteger()) ||
7815       (MatchRC.second != InputRC.second)) {
7816     // FIXME: error out in a more elegant fashion
7817     report_fatal_error("Unsupported asm: input constraint"
7818                        " with a matching output constraint of"
7819                        " incompatible type!");
7820   }
7821   MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT;
7822 }
7823 
7824 /// Get a direct memory input to behave well as an indirect operand.
7825 /// This may introduce stores, hence the need for a \p Chain.
7826 /// \return The (possibly updated) chain.
7827 static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location,
7828                                         SDISelAsmOperandInfo &OpInfo,
7829                                         SelectionDAG &DAG) {
7830   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7831 
7832   // If we don't have an indirect input, put it in the constpool if we can,
7833   // otherwise spill it to a stack slot.
7834   // TODO: This isn't quite right. We need to handle these according to
7835   // the addressing mode that the constraint wants. Also, this may take
7836   // an additional register for the computation and we don't want that
7837   // either.
7838 
7839   // If the operand is a float, integer, or vector constant, spill to a
7840   // constant pool entry to get its address.
7841   const Value *OpVal = OpInfo.CallOperandVal;
7842   if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
7843       isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) {
7844     OpInfo.CallOperand = DAG.getConstantPool(
7845         cast<Constant>(OpVal), TLI.getPointerTy(DAG.getDataLayout()));
7846     return Chain;
7847   }
7848 
7849   // Otherwise, create a stack slot and emit a store to it before the asm.
7850   Type *Ty = OpVal->getType();
7851   auto &DL = DAG.getDataLayout();
7852   uint64_t TySize = DL.getTypeAllocSize(Ty);
7853   unsigned Align = DL.getPrefTypeAlignment(Ty);
7854   MachineFunction &MF = DAG.getMachineFunction();
7855   int SSFI = MF.getFrameInfo().CreateStackObject(TySize, Align, false);
7856   SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getFrameIndexTy(DL));
7857   Chain = DAG.getTruncStore(Chain, Location, OpInfo.CallOperand, StackSlot,
7858                             MachinePointerInfo::getFixedStack(MF, SSFI),
7859                             TLI.getMemValueType(DL, Ty));
7860   OpInfo.CallOperand = StackSlot;
7861 
7862   return Chain;
7863 }
7864 
7865 /// GetRegistersForValue - Assign registers (virtual or physical) for the
7866 /// specified operand.  We prefer to assign virtual registers, to allow the
7867 /// register allocator to handle the assignment process.  However, if the asm
7868 /// uses features that we can't model on machineinstrs, we have SDISel do the
7869 /// allocation.  This produces generally horrible, but correct, code.
7870 ///
7871 ///   OpInfo describes the operand
7872 ///   RefOpInfo describes the matching operand if any, the operand otherwise
7873 static void GetRegistersForValue(SelectionDAG &DAG, const SDLoc &DL,
7874                                  SDISelAsmOperandInfo &OpInfo,
7875                                  SDISelAsmOperandInfo &RefOpInfo) {
7876   LLVMContext &Context = *DAG.getContext();
7877   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7878 
7879   MachineFunction &MF = DAG.getMachineFunction();
7880   SmallVector<unsigned, 4> Regs;
7881   const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
7882 
7883   // No work to do for memory operations.
7884   if (OpInfo.ConstraintType == TargetLowering::C_Memory)
7885     return;
7886 
7887   // If this is a constraint for a single physreg, or a constraint for a
7888   // register class, find it.
7889   unsigned AssignedReg;
7890   const TargetRegisterClass *RC;
7891   std::tie(AssignedReg, RC) = TLI.getRegForInlineAsmConstraint(
7892       &TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT);
7893   // RC is unset only on failure. Return immediately.
7894   if (!RC)
7895     return;
7896 
7897   // Get the actual register value type.  This is important, because the user
7898   // may have asked for (e.g.) the AX register in i32 type.  We need to
7899   // remember that AX is actually i16 to get the right extension.
7900   const MVT RegVT = *TRI.legalclasstypes_begin(*RC);
7901 
7902   if (OpInfo.ConstraintVT != MVT::Other) {
7903     // If this is an FP operand in an integer register (or visa versa), or more
7904     // generally if the operand value disagrees with the register class we plan
7905     // to stick it in, fix the operand type.
7906     //
7907     // If this is an input value, the bitcast to the new type is done now.
7908     // Bitcast for output value is done at the end of visitInlineAsm().
7909     if ((OpInfo.Type == InlineAsm::isOutput ||
7910          OpInfo.Type == InlineAsm::isInput) &&
7911         !TRI.isTypeLegalForClass(*RC, OpInfo.ConstraintVT)) {
7912       // Try to convert to the first EVT that the reg class contains.  If the
7913       // types are identical size, use a bitcast to convert (e.g. two differing
7914       // vector types).  Note: output bitcast is done at the end of
7915       // visitInlineAsm().
7916       if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
7917         // Exclude indirect inputs while they are unsupported because the code
7918         // to perform the load is missing and thus OpInfo.CallOperand still
7919         // refers to the input address rather than the pointed-to value.
7920         if (OpInfo.Type == InlineAsm::isInput && !OpInfo.isIndirect)
7921           OpInfo.CallOperand =
7922               DAG.getNode(ISD::BITCAST, DL, RegVT, OpInfo.CallOperand);
7923         OpInfo.ConstraintVT = RegVT;
7924         // If the operand is an FP value and we want it in integer registers,
7925         // use the corresponding integer type. This turns an f64 value into
7926         // i64, which can be passed with two i32 values on a 32-bit machine.
7927       } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
7928         MVT VT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits());
7929         if (OpInfo.Type == InlineAsm::isInput)
7930           OpInfo.CallOperand =
7931               DAG.getNode(ISD::BITCAST, DL, VT, OpInfo.CallOperand);
7932         OpInfo.ConstraintVT = VT;
7933       }
7934     }
7935   }
7936 
7937   // No need to allocate a matching input constraint since the constraint it's
7938   // matching to has already been allocated.
7939   if (OpInfo.isMatchingInputConstraint())
7940     return;
7941 
7942   EVT ValueVT = OpInfo.ConstraintVT;
7943   if (OpInfo.ConstraintVT == MVT::Other)
7944     ValueVT = RegVT;
7945 
7946   // Initialize NumRegs.
7947   unsigned NumRegs = 1;
7948   if (OpInfo.ConstraintVT != MVT::Other)
7949     NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT);
7950 
7951   // If this is a constraint for a specific physical register, like {r17},
7952   // assign it now.
7953 
7954   // If this associated to a specific register, initialize iterator to correct
7955   // place. If virtual, make sure we have enough registers
7956 
7957   // Initialize iterator if necessary
7958   TargetRegisterClass::iterator I = RC->begin();
7959   MachineRegisterInfo &RegInfo = MF.getRegInfo();
7960 
7961   // Do not check for single registers.
7962   if (AssignedReg) {
7963       for (; *I != AssignedReg; ++I)
7964         assert(I != RC->end() && "AssignedReg should be member of RC");
7965   }
7966 
7967   for (; NumRegs; --NumRegs, ++I) {
7968     assert(I != RC->end() && "Ran out of registers to allocate!");
7969     Register R = AssignedReg ? Register(*I) : RegInfo.createVirtualRegister(RC);
7970     Regs.push_back(R);
7971   }
7972 
7973   OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
7974 }
7975 
7976 static unsigned
7977 findMatchingInlineAsmOperand(unsigned OperandNo,
7978                              const std::vector<SDValue> &AsmNodeOperands) {
7979   // Scan until we find the definition we already emitted of this operand.
7980   unsigned CurOp = InlineAsm::Op_FirstOperand;
7981   for (; OperandNo; --OperandNo) {
7982     // Advance to the next operand.
7983     unsigned OpFlag =
7984         cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
7985     assert((InlineAsm::isRegDefKind(OpFlag) ||
7986             InlineAsm::isRegDefEarlyClobberKind(OpFlag) ||
7987             InlineAsm::isMemKind(OpFlag)) &&
7988            "Skipped past definitions?");
7989     CurOp += InlineAsm::getNumOperandRegisters(OpFlag) + 1;
7990   }
7991   return CurOp;
7992 }
7993 
7994 namespace {
7995 
7996 class ExtraFlags {
7997   unsigned Flags = 0;
7998 
7999 public:
8000   explicit ExtraFlags(const CallBase &Call) {
8001     const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
8002     if (IA->hasSideEffects())
8003       Flags |= InlineAsm::Extra_HasSideEffects;
8004     if (IA->isAlignStack())
8005       Flags |= InlineAsm::Extra_IsAlignStack;
8006     if (Call.isConvergent())
8007       Flags |= InlineAsm::Extra_IsConvergent;
8008     Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
8009   }
8010 
8011   void update(const TargetLowering::AsmOperandInfo &OpInfo) {
8012     // Ideally, we would only check against memory constraints.  However, the
8013     // meaning of an Other constraint can be target-specific and we can't easily
8014     // reason about it.  Therefore, be conservative and set MayLoad/MayStore
8015     // for Other constraints as well.
8016     if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
8017         OpInfo.ConstraintType == TargetLowering::C_Other) {
8018       if (OpInfo.Type == InlineAsm::isInput)
8019         Flags |= InlineAsm::Extra_MayLoad;
8020       else if (OpInfo.Type == InlineAsm::isOutput)
8021         Flags |= InlineAsm::Extra_MayStore;
8022       else if (OpInfo.Type == InlineAsm::isClobber)
8023         Flags |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore);
8024     }
8025   }
8026 
8027   unsigned get() const { return Flags; }
8028 };
8029 
8030 } // end anonymous namespace
8031 
8032 /// visitInlineAsm - Handle a call to an InlineAsm object.
8033 void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call) {
8034   const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
8035 
8036   /// ConstraintOperands - Information about all of the constraints.
8037   SDISelAsmOperandInfoVector ConstraintOperands;
8038 
8039   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8040   TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints(
8041       DAG.getDataLayout(), DAG.getSubtarget().getRegisterInfo(), Call);
8042 
8043   // First Pass: Calculate HasSideEffects and ExtraFlags (AlignStack,
8044   // AsmDialect, MayLoad, MayStore).
8045   bool HasSideEffect = IA->hasSideEffects();
8046   ExtraFlags ExtraInfo(Call);
8047 
8048   unsigned ArgNo = 0;   // ArgNo - The argument of the CallInst.
8049   unsigned ResNo = 0;   // ResNo - The result number of the next output.
8050   unsigned NumMatchingOps = 0;
8051   for (auto &T : TargetConstraints) {
8052     ConstraintOperands.push_back(SDISelAsmOperandInfo(T));
8053     SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
8054 
8055     // Compute the value type for each operand.
8056     if (OpInfo.Type == InlineAsm::isInput ||
8057         (OpInfo.Type == InlineAsm::isOutput && OpInfo.isIndirect)) {
8058       OpInfo.CallOperandVal = Call.getArgOperand(ArgNo++);
8059 
8060       // Process the call argument. BasicBlocks are labels, currently appearing
8061       // only in asm's.
8062       if (isa<CallBrInst>(Call) &&
8063           ArgNo - 1 >= (cast<CallBrInst>(&Call)->getNumArgOperands() -
8064                         cast<CallBrInst>(&Call)->getNumIndirectDests() -
8065                         NumMatchingOps) &&
8066           (NumMatchingOps == 0 ||
8067            ArgNo - 1 < (cast<CallBrInst>(&Call)->getNumArgOperands() -
8068                         NumMatchingOps))) {
8069         const auto *BA = cast<BlockAddress>(OpInfo.CallOperandVal);
8070         EVT VT = TLI.getValueType(DAG.getDataLayout(), BA->getType(), true);
8071         OpInfo.CallOperand = DAG.getTargetBlockAddress(BA, VT);
8072       } else if (const auto *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
8073         OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
8074       } else {
8075         OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
8076       }
8077 
8078       OpInfo.ConstraintVT =
8079           OpInfo
8080               .getCallOperandValEVT(*DAG.getContext(), TLI, DAG.getDataLayout())
8081               .getSimpleVT();
8082     } else if (OpInfo.Type == InlineAsm::isOutput && !OpInfo.isIndirect) {
8083       // The return value of the call is this value.  As such, there is no
8084       // corresponding argument.
8085       assert(!Call.getType()->isVoidTy() && "Bad inline asm!");
8086       if (StructType *STy = dyn_cast<StructType>(Call.getType())) {
8087         OpInfo.ConstraintVT = TLI.getSimpleValueType(
8088             DAG.getDataLayout(), STy->getElementType(ResNo));
8089       } else {
8090         assert(ResNo == 0 && "Asm only has one result!");
8091         OpInfo.ConstraintVT =
8092             TLI.getSimpleValueType(DAG.getDataLayout(), Call.getType());
8093       }
8094       ++ResNo;
8095     } else {
8096       OpInfo.ConstraintVT = MVT::Other;
8097     }
8098 
8099     if (OpInfo.hasMatchingInput())
8100       ++NumMatchingOps;
8101 
8102     if (!HasSideEffect)
8103       HasSideEffect = OpInfo.hasMemory(TLI);
8104 
8105     // Determine if this InlineAsm MayLoad or MayStore based on the constraints.
8106     // FIXME: Could we compute this on OpInfo rather than T?
8107 
8108     // Compute the constraint code and ConstraintType to use.
8109     TLI.ComputeConstraintToUse(T, SDValue());
8110 
8111     if (T.ConstraintType == TargetLowering::C_Immediate &&
8112         OpInfo.CallOperand && !isa<ConstantSDNode>(OpInfo.CallOperand))
8113       // We've delayed emitting a diagnostic like the "n" constraint because
8114       // inlining could cause an integer showing up.
8115       return emitInlineAsmError(Call, "constraint '" + Twine(T.ConstraintCode) +
8116                                           "' expects an integer constant "
8117                                           "expression");
8118 
8119     ExtraInfo.update(T);
8120   }
8121 
8122 
8123   // We won't need to flush pending loads if this asm doesn't touch
8124   // memory and is nonvolatile.
8125   SDValue Flag, Chain = (HasSideEffect) ? getRoot() : DAG.getRoot();
8126 
8127   bool IsCallBr = isa<CallBrInst>(Call);
8128   if (IsCallBr) {
8129     // If this is a callbr we need to flush pending exports since inlineasm_br
8130     // is a terminator. We need to do this before nodes are glued to
8131     // the inlineasm_br node.
8132     Chain = getControlRoot();
8133   }
8134 
8135   // Second pass over the constraints: compute which constraint option to use.
8136   for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
8137     // If this is an output operand with a matching input operand, look up the
8138     // matching input. If their types mismatch, e.g. one is an integer, the
8139     // other is floating point, or their sizes are different, flag it as an
8140     // error.
8141     if (OpInfo.hasMatchingInput()) {
8142       SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
8143       patchMatchingInput(OpInfo, Input, DAG);
8144     }
8145 
8146     // Compute the constraint code and ConstraintType to use.
8147     TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
8148 
8149     if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
8150         OpInfo.Type == InlineAsm::isClobber)
8151       continue;
8152 
8153     // If this is a memory input, and if the operand is not indirect, do what we
8154     // need to provide an address for the memory input.
8155     if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
8156         !OpInfo.isIndirect) {
8157       assert((OpInfo.isMultipleAlternative ||
8158               (OpInfo.Type == InlineAsm::isInput)) &&
8159              "Can only indirectify direct input operands!");
8160 
8161       // Memory operands really want the address of the value.
8162       Chain = getAddressForMemoryInput(Chain, getCurSDLoc(), OpInfo, DAG);
8163 
8164       // There is no longer a Value* corresponding to this operand.
8165       OpInfo.CallOperandVal = nullptr;
8166 
8167       // It is now an indirect operand.
8168       OpInfo.isIndirect = true;
8169     }
8170 
8171   }
8172 
8173   // AsmNodeOperands - The operands for the ISD::INLINEASM node.
8174   std::vector<SDValue> AsmNodeOperands;
8175   AsmNodeOperands.push_back(SDValue());  // reserve space for input chain
8176   AsmNodeOperands.push_back(DAG.getTargetExternalSymbol(
8177       IA->getAsmString().c_str(), TLI.getProgramPointerTy(DAG.getDataLayout())));
8178 
8179   // If we have a !srcloc metadata node associated with it, we want to attach
8180   // this to the ultimately generated inline asm machineinstr.  To do this, we
8181   // pass in the third operand as this (potentially null) inline asm MDNode.
8182   const MDNode *SrcLoc = Call.getMetadata("srcloc");
8183   AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc));
8184 
8185   // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore
8186   // bits as operand 3.
8187   AsmNodeOperands.push_back(DAG.getTargetConstant(
8188       ExtraInfo.get(), getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
8189 
8190   // Third pass: Loop over operands to prepare DAG-level operands.. As part of
8191   // this, assign virtual and physical registers for inputs and otput.
8192   for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
8193     // Assign Registers.
8194     SDISelAsmOperandInfo &RefOpInfo =
8195         OpInfo.isMatchingInputConstraint()
8196             ? ConstraintOperands[OpInfo.getMatchedOperand()]
8197             : OpInfo;
8198     GetRegistersForValue(DAG, getCurSDLoc(), OpInfo, RefOpInfo);
8199 
8200     auto DetectWriteToReservedRegister = [&]() {
8201       const MachineFunction &MF = DAG.getMachineFunction();
8202       const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
8203       for (unsigned Reg : OpInfo.AssignedRegs.Regs) {
8204         if (Register::isPhysicalRegister(Reg) &&
8205             TRI.isInlineAsmReadOnlyReg(MF, Reg)) {
8206           const char *RegName = TRI.getName(Reg);
8207           emitInlineAsmError(Call, "write to reserved register '" +
8208                                        Twine(RegName) + "'");
8209           return true;
8210         }
8211       }
8212       return false;
8213     };
8214 
8215     switch (OpInfo.Type) {
8216     case InlineAsm::isOutput:
8217       if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
8218         unsigned ConstraintID =
8219             TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
8220         assert(ConstraintID != InlineAsm::Constraint_Unknown &&
8221                "Failed to convert memory constraint code to constraint id.");
8222 
8223         // Add information to the INLINEASM node to know about this output.
8224         unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
8225         OpFlags = InlineAsm::getFlagWordForMem(OpFlags, ConstraintID);
8226         AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlags, getCurSDLoc(),
8227                                                         MVT::i32));
8228         AsmNodeOperands.push_back(OpInfo.CallOperand);
8229       } else {
8230         // Otherwise, this outputs to a register (directly for C_Register /
8231         // C_RegisterClass, and a target-defined fashion for
8232         // C_Immediate/C_Other). Find a register that we can use.
8233         if (OpInfo.AssignedRegs.Regs.empty()) {
8234           emitInlineAsmError(
8235               Call, "couldn't allocate output register for constraint '" +
8236                         Twine(OpInfo.ConstraintCode) + "'");
8237           return;
8238         }
8239 
8240         if (DetectWriteToReservedRegister())
8241           return;
8242 
8243         // Add information to the INLINEASM node to know that this register is
8244         // set.
8245         OpInfo.AssignedRegs.AddInlineAsmOperands(
8246             OpInfo.isEarlyClobber ? InlineAsm::Kind_RegDefEarlyClobber
8247                                   : InlineAsm::Kind_RegDef,
8248             false, 0, getCurSDLoc(), DAG, AsmNodeOperands);
8249       }
8250       break;
8251 
8252     case InlineAsm::isInput: {
8253       SDValue InOperandVal = OpInfo.CallOperand;
8254 
8255       if (OpInfo.isMatchingInputConstraint()) {
8256         // If this is required to match an output register we have already set,
8257         // just use its register.
8258         auto CurOp = findMatchingInlineAsmOperand(OpInfo.getMatchedOperand(),
8259                                                   AsmNodeOperands);
8260         unsigned OpFlag =
8261           cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
8262         if (InlineAsm::isRegDefKind(OpFlag) ||
8263             InlineAsm::isRegDefEarlyClobberKind(OpFlag)) {
8264           // Add (OpFlag&0xffff)>>3 registers to MatchedRegs.
8265           if (OpInfo.isIndirect) {
8266             // This happens on gcc/testsuite/gcc.dg/pr8788-1.c
8267             emitInlineAsmError(Call, "inline asm not supported yet: "
8268                                      "don't know how to handle tied "
8269                                      "indirect register inputs");
8270             return;
8271           }
8272 
8273           MVT RegVT = AsmNodeOperands[CurOp+1].getSimpleValueType();
8274           SmallVector<unsigned, 4> Regs;
8275 
8276           if (const TargetRegisterClass *RC = TLI.getRegClassFor(RegVT)) {
8277             unsigned NumRegs = InlineAsm::getNumOperandRegisters(OpFlag);
8278             MachineRegisterInfo &RegInfo =
8279                 DAG.getMachineFunction().getRegInfo();
8280             for (unsigned i = 0; i != NumRegs; ++i)
8281               Regs.push_back(RegInfo.createVirtualRegister(RC));
8282           } else {
8283             emitInlineAsmError(Call,
8284                                "inline asm error: This value type register "
8285                                "class is not natively supported!");
8286             return;
8287           }
8288 
8289           RegsForValue MatchedRegs(Regs, RegVT, InOperandVal.getValueType());
8290 
8291           SDLoc dl = getCurSDLoc();
8292           // Use the produced MatchedRegs object to
8293           MatchedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Flag, &Call);
8294           MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse,
8295                                            true, OpInfo.getMatchedOperand(), dl,
8296                                            DAG, AsmNodeOperands);
8297           break;
8298         }
8299 
8300         assert(InlineAsm::isMemKind(OpFlag) && "Unknown matching constraint!");
8301         assert(InlineAsm::getNumOperandRegisters(OpFlag) == 1 &&
8302                "Unexpected number of operands");
8303         // Add information to the INLINEASM node to know about this input.
8304         // See InlineAsm.h isUseOperandTiedToDef.
8305         OpFlag = InlineAsm::convertMemFlagWordToMatchingFlagWord(OpFlag);
8306         OpFlag = InlineAsm::getFlagWordForMatchingOp(OpFlag,
8307                                                     OpInfo.getMatchedOperand());
8308         AsmNodeOperands.push_back(DAG.getTargetConstant(
8309             OpFlag, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
8310         AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
8311         break;
8312       }
8313 
8314       // Treat indirect 'X' constraint as memory.
8315       if (OpInfo.ConstraintType == TargetLowering::C_Other &&
8316           OpInfo.isIndirect)
8317         OpInfo.ConstraintType = TargetLowering::C_Memory;
8318 
8319       if (OpInfo.ConstraintType == TargetLowering::C_Immediate ||
8320           OpInfo.ConstraintType == TargetLowering::C_Other) {
8321         std::vector<SDValue> Ops;
8322         TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode,
8323                                           Ops, DAG);
8324         if (Ops.empty()) {
8325           if (OpInfo.ConstraintType == TargetLowering::C_Immediate)
8326             if (isa<ConstantSDNode>(InOperandVal)) {
8327               emitInlineAsmError(Call, "value out of range for constraint '" +
8328                                            Twine(OpInfo.ConstraintCode) + "'");
8329               return;
8330             }
8331 
8332           emitInlineAsmError(Call,
8333                              "invalid operand for inline asm constraint '" +
8334                                  Twine(OpInfo.ConstraintCode) + "'");
8335           return;
8336         }
8337 
8338         // Add information to the INLINEASM node to know about this input.
8339         unsigned ResOpType =
8340           InlineAsm::getFlagWord(InlineAsm::Kind_Imm, Ops.size());
8341         AsmNodeOperands.push_back(DAG.getTargetConstant(
8342             ResOpType, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
8343         AsmNodeOperands.insert(AsmNodeOperands.end(), Ops.begin(), Ops.end());
8344         break;
8345       }
8346 
8347       if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
8348         assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
8349         assert(InOperandVal.getValueType() ==
8350                    TLI.getPointerTy(DAG.getDataLayout()) &&
8351                "Memory operands expect pointer values");
8352 
8353         unsigned ConstraintID =
8354             TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
8355         assert(ConstraintID != InlineAsm::Constraint_Unknown &&
8356                "Failed to convert memory constraint code to constraint id.");
8357 
8358         // Add information to the INLINEASM node to know about this input.
8359         unsigned ResOpType = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
8360         ResOpType = InlineAsm::getFlagWordForMem(ResOpType, ConstraintID);
8361         AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
8362                                                         getCurSDLoc(),
8363                                                         MVT::i32));
8364         AsmNodeOperands.push_back(InOperandVal);
8365         break;
8366       }
8367 
8368       assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
8369               OpInfo.ConstraintType == TargetLowering::C_Register) &&
8370              "Unknown constraint type!");
8371 
8372       // TODO: Support this.
8373       if (OpInfo.isIndirect) {
8374         emitInlineAsmError(
8375             Call, "Don't know how to handle indirect register inputs yet "
8376                   "for constraint '" +
8377                       Twine(OpInfo.ConstraintCode) + "'");
8378         return;
8379       }
8380 
8381       // Copy the input into the appropriate registers.
8382       if (OpInfo.AssignedRegs.Regs.empty()) {
8383         emitInlineAsmError(Call,
8384                            "couldn't allocate input reg for constraint '" +
8385                                Twine(OpInfo.ConstraintCode) + "'");
8386         return;
8387       }
8388 
8389       if (DetectWriteToReservedRegister())
8390         return;
8391 
8392       SDLoc dl = getCurSDLoc();
8393 
8394       OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Flag,
8395                                         &Call);
8396 
8397       OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse, false, 0,
8398                                                dl, DAG, AsmNodeOperands);
8399       break;
8400     }
8401     case InlineAsm::isClobber:
8402       // Add the clobbered value to the operand list, so that the register
8403       // allocator is aware that the physreg got clobbered.
8404       if (!OpInfo.AssignedRegs.Regs.empty())
8405         OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_Clobber,
8406                                                  false, 0, getCurSDLoc(), DAG,
8407                                                  AsmNodeOperands);
8408       break;
8409     }
8410   }
8411 
8412   // Finish up input operands.  Set the input chain and add the flag last.
8413   AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
8414   if (Flag.getNode()) AsmNodeOperands.push_back(Flag);
8415 
8416   unsigned ISDOpc = IsCallBr ? ISD::INLINEASM_BR : ISD::INLINEASM;
8417   Chain = DAG.getNode(ISDOpc, getCurSDLoc(),
8418                       DAG.getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
8419   Flag = Chain.getValue(1);
8420 
8421   // Do additional work to generate outputs.
8422 
8423   SmallVector<EVT, 1> ResultVTs;
8424   SmallVector<SDValue, 1> ResultValues;
8425   SmallVector<SDValue, 8> OutChains;
8426 
8427   llvm::Type *CallResultType = Call.getType();
8428   ArrayRef<Type *> ResultTypes;
8429   if (StructType *StructResult = dyn_cast<StructType>(CallResultType))
8430     ResultTypes = StructResult->elements();
8431   else if (!CallResultType->isVoidTy())
8432     ResultTypes = makeArrayRef(CallResultType);
8433 
8434   auto CurResultType = ResultTypes.begin();
8435   auto handleRegAssign = [&](SDValue V) {
8436     assert(CurResultType != ResultTypes.end() && "Unexpected value");
8437     assert((*CurResultType)->isSized() && "Unexpected unsized type");
8438     EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), *CurResultType);
8439     ++CurResultType;
8440     // If the type of the inline asm call site return value is different but has
8441     // same size as the type of the asm output bitcast it.  One example of this
8442     // is for vectors with different width / number of elements.  This can
8443     // happen for register classes that can contain multiple different value
8444     // types.  The preg or vreg allocated may not have the same VT as was
8445     // expected.
8446     //
8447     // This can also happen for a return value that disagrees with the register
8448     // class it is put in, eg. a double in a general-purpose register on a
8449     // 32-bit machine.
8450     if (ResultVT != V.getValueType() &&
8451         ResultVT.getSizeInBits() == V.getValueSizeInBits())
8452       V = DAG.getNode(ISD::BITCAST, getCurSDLoc(), ResultVT, V);
8453     else if (ResultVT != V.getValueType() && ResultVT.isInteger() &&
8454              V.getValueType().isInteger()) {
8455       // If a result value was tied to an input value, the computed result
8456       // may have a wider width than the expected result.  Extract the
8457       // relevant portion.
8458       V = DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), ResultVT, V);
8459     }
8460     assert(ResultVT == V.getValueType() && "Asm result value mismatch!");
8461     ResultVTs.push_back(ResultVT);
8462     ResultValues.push_back(V);
8463   };
8464 
8465   // Deal with output operands.
8466   for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
8467     if (OpInfo.Type == InlineAsm::isOutput) {
8468       SDValue Val;
8469       // Skip trivial output operands.
8470       if (OpInfo.AssignedRegs.Regs.empty())
8471         continue;
8472 
8473       switch (OpInfo.ConstraintType) {
8474       case TargetLowering::C_Register:
8475       case TargetLowering::C_RegisterClass:
8476         Val = OpInfo.AssignedRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(),
8477                                                   Chain, &Flag, &Call);
8478         break;
8479       case TargetLowering::C_Immediate:
8480       case TargetLowering::C_Other:
8481         Val = TLI.LowerAsmOutputForConstraint(Chain, Flag, getCurSDLoc(),
8482                                               OpInfo, DAG);
8483         break;
8484       case TargetLowering::C_Memory:
8485         break; // Already handled.
8486       case TargetLowering::C_Unknown:
8487         assert(false && "Unexpected unknown constraint");
8488       }
8489 
8490       // Indirect output manifest as stores. Record output chains.
8491       if (OpInfo.isIndirect) {
8492         const Value *Ptr = OpInfo.CallOperandVal;
8493         assert(Ptr && "Expected value CallOperandVal for indirect asm operand");
8494         SDValue Store = DAG.getStore(Chain, getCurSDLoc(), Val, getValue(Ptr),
8495                                      MachinePointerInfo(Ptr));
8496         OutChains.push_back(Store);
8497       } else {
8498         // generate CopyFromRegs to associated registers.
8499         assert(!Call.getType()->isVoidTy() && "Bad inline asm!");
8500         if (Val.getOpcode() == ISD::MERGE_VALUES) {
8501           for (const SDValue &V : Val->op_values())
8502             handleRegAssign(V);
8503         } else
8504           handleRegAssign(Val);
8505       }
8506     }
8507   }
8508 
8509   // Set results.
8510   if (!ResultValues.empty()) {
8511     assert(CurResultType == ResultTypes.end() &&
8512            "Mismatch in number of ResultTypes");
8513     assert(ResultValues.size() == ResultTypes.size() &&
8514            "Mismatch in number of output operands in asm result");
8515 
8516     SDValue V = DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
8517                             DAG.getVTList(ResultVTs), ResultValues);
8518     setValue(&Call, V);
8519   }
8520 
8521   // Collect store chains.
8522   if (!OutChains.empty())
8523     Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, OutChains);
8524 
8525   // Only Update Root if inline assembly has a memory effect.
8526   if (ResultValues.empty() || HasSideEffect || !OutChains.empty() || IsCallBr)
8527     DAG.setRoot(Chain);
8528 }
8529 
8530 void SelectionDAGBuilder::emitInlineAsmError(const CallBase &Call,
8531                                              const Twine &Message) {
8532   LLVMContext &Ctx = *DAG.getContext();
8533   Ctx.emitError(&Call, Message);
8534 
8535   // Make sure we leave the DAG in a valid state
8536   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8537   SmallVector<EVT, 1> ValueVTs;
8538   ComputeValueVTs(TLI, DAG.getDataLayout(), Call.getType(), ValueVTs);
8539 
8540   if (ValueVTs.empty())
8541     return;
8542 
8543   SmallVector<SDValue, 1> Ops;
8544   for (unsigned i = 0, e = ValueVTs.size(); i != e; ++i)
8545     Ops.push_back(DAG.getUNDEF(ValueVTs[i]));
8546 
8547   setValue(&Call, DAG.getMergeValues(Ops, getCurSDLoc()));
8548 }
8549 
8550 void SelectionDAGBuilder::visitVAStart(const CallInst &I) {
8551   DAG.setRoot(DAG.getNode(ISD::VASTART, getCurSDLoc(),
8552                           MVT::Other, getRoot(),
8553                           getValue(I.getArgOperand(0)),
8554                           DAG.getSrcValue(I.getArgOperand(0))));
8555 }
8556 
8557 void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
8558   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8559   const DataLayout &DL = DAG.getDataLayout();
8560   SDValue V = DAG.getVAArg(
8561       TLI.getMemValueType(DAG.getDataLayout(), I.getType()), getCurSDLoc(),
8562       getRoot(), getValue(I.getOperand(0)), DAG.getSrcValue(I.getOperand(0)),
8563       DL.getABITypeAlignment(I.getType()));
8564   DAG.setRoot(V.getValue(1));
8565 
8566   if (I.getType()->isPointerTy())
8567     V = DAG.getPtrExtOrTrunc(
8568         V, getCurSDLoc(), TLI.getValueType(DAG.getDataLayout(), I.getType()));
8569   setValue(&I, V);
8570 }
8571 
8572 void SelectionDAGBuilder::visitVAEnd(const CallInst &I) {
8573   DAG.setRoot(DAG.getNode(ISD::VAEND, getCurSDLoc(),
8574                           MVT::Other, getRoot(),
8575                           getValue(I.getArgOperand(0)),
8576                           DAG.getSrcValue(I.getArgOperand(0))));
8577 }
8578 
8579 void SelectionDAGBuilder::visitVACopy(const CallInst &I) {
8580   DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurSDLoc(),
8581                           MVT::Other, getRoot(),
8582                           getValue(I.getArgOperand(0)),
8583                           getValue(I.getArgOperand(1)),
8584                           DAG.getSrcValue(I.getArgOperand(0)),
8585                           DAG.getSrcValue(I.getArgOperand(1))));
8586 }
8587 
8588 SDValue SelectionDAGBuilder::lowerRangeToAssertZExt(SelectionDAG &DAG,
8589                                                     const Instruction &I,
8590                                                     SDValue Op) {
8591   const MDNode *Range = I.getMetadata(LLVMContext::MD_range);
8592   if (!Range)
8593     return Op;
8594 
8595   ConstantRange CR = getConstantRangeFromMetadata(*Range);
8596   if (CR.isFullSet() || CR.isEmptySet() || CR.isUpperWrapped())
8597     return Op;
8598 
8599   APInt Lo = CR.getUnsignedMin();
8600   if (!Lo.isMinValue())
8601     return Op;
8602 
8603   APInt Hi = CR.getUnsignedMax();
8604   unsigned Bits = std::max(Hi.getActiveBits(),
8605                            static_cast<unsigned>(IntegerType::MIN_INT_BITS));
8606 
8607   EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
8608 
8609   SDLoc SL = getCurSDLoc();
8610 
8611   SDValue ZExt = DAG.getNode(ISD::AssertZext, SL, Op.getValueType(), Op,
8612                              DAG.getValueType(SmallVT));
8613   unsigned NumVals = Op.getNode()->getNumValues();
8614   if (NumVals == 1)
8615     return ZExt;
8616 
8617   SmallVector<SDValue, 4> Ops;
8618 
8619   Ops.push_back(ZExt);
8620   for (unsigned I = 1; I != NumVals; ++I)
8621     Ops.push_back(Op.getValue(I));
8622 
8623   return DAG.getMergeValues(Ops, SL);
8624 }
8625 
8626 /// Populate a CallLowerinInfo (into \p CLI) based on the properties of
8627 /// the call being lowered.
8628 ///
8629 /// This is a helper for lowering intrinsics that follow a target calling
8630 /// convention or require stack pointer adjustment. Only a subset of the
8631 /// intrinsic's operands need to participate in the calling convention.
8632 void SelectionDAGBuilder::populateCallLoweringInfo(
8633     TargetLowering::CallLoweringInfo &CLI, const CallBase *Call,
8634     unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy,
8635     bool IsPatchPoint) {
8636   TargetLowering::ArgListTy Args;
8637   Args.reserve(NumArgs);
8638 
8639   // Populate the argument list.
8640   // Attributes for args start at offset 1, after the return attribute.
8641   for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs;
8642        ArgI != ArgE; ++ArgI) {
8643     const Value *V = Call->getOperand(ArgI);
8644 
8645     assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
8646 
8647     TargetLowering::ArgListEntry Entry;
8648     Entry.Node = getValue(V);
8649     Entry.Ty = V->getType();
8650     Entry.setAttributes(Call, ArgI);
8651     Args.push_back(Entry);
8652   }
8653 
8654   CLI.setDebugLoc(getCurSDLoc())
8655       .setChain(getRoot())
8656       .setCallee(Call->getCallingConv(), ReturnTy, Callee, std::move(Args))
8657       .setDiscardResult(Call->use_empty())
8658       .setIsPatchPoint(IsPatchPoint)
8659       .setIsPreallocated(
8660           Call->countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0);
8661 }
8662 
8663 /// Add a stack map intrinsic call's live variable operands to a stackmap
8664 /// or patchpoint target node's operand list.
8665 ///
8666 /// Constants are converted to TargetConstants purely as an optimization to
8667 /// avoid constant materialization and register allocation.
8668 ///
8669 /// FrameIndex operands are converted to TargetFrameIndex so that ISEL does not
8670 /// generate addess computation nodes, and so FinalizeISel can convert the
8671 /// TargetFrameIndex into a DirectMemRefOp StackMap location. This avoids
8672 /// address materialization and register allocation, but may also be required
8673 /// for correctness. If a StackMap (or PatchPoint) intrinsic directly uses an
8674 /// alloca in the entry block, then the runtime may assume that the alloca's
8675 /// StackMap location can be read immediately after compilation and that the
8676 /// location is valid at any point during execution (this is similar to the
8677 /// assumption made by the llvm.gcroot intrinsic). If the alloca's location were
8678 /// only available in a register, then the runtime would need to trap when
8679 /// execution reaches the StackMap in order to read the alloca's location.
8680 static void addStackMapLiveVars(const CallBase &Call, unsigned StartIdx,
8681                                 const SDLoc &DL, SmallVectorImpl<SDValue> &Ops,
8682                                 SelectionDAGBuilder &Builder) {
8683   for (unsigned i = StartIdx, e = Call.arg_size(); i != e; ++i) {
8684     SDValue OpVal = Builder.getValue(Call.getArgOperand(i));
8685     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(OpVal)) {
8686       Ops.push_back(
8687         Builder.DAG.getTargetConstant(StackMaps::ConstantOp, DL, MVT::i64));
8688       Ops.push_back(
8689         Builder.DAG.getTargetConstant(C->getSExtValue(), DL, MVT::i64));
8690     } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(OpVal)) {
8691       const TargetLowering &TLI = Builder.DAG.getTargetLoweringInfo();
8692       Ops.push_back(Builder.DAG.getTargetFrameIndex(
8693           FI->getIndex(), TLI.getFrameIndexTy(Builder.DAG.getDataLayout())));
8694     } else
8695       Ops.push_back(OpVal);
8696   }
8697 }
8698 
8699 /// Lower llvm.experimental.stackmap directly to its target opcode.
8700 void SelectionDAGBuilder::visitStackmap(const CallInst &CI) {
8701   // void @llvm.experimental.stackmap(i32 <id>, i32 <numShadowBytes>,
8702   //                                  [live variables...])
8703 
8704   assert(CI.getType()->isVoidTy() && "Stackmap cannot return a value.");
8705 
8706   SDValue Chain, InFlag, Callee, NullPtr;
8707   SmallVector<SDValue, 32> Ops;
8708 
8709   SDLoc DL = getCurSDLoc();
8710   Callee = getValue(CI.getCalledOperand());
8711   NullPtr = DAG.getIntPtrConstant(0, DL, true);
8712 
8713   // The stackmap intrinsic only records the live variables (the arguments
8714   // passed to it) and emits NOPS (if requested). Unlike the patchpoint
8715   // intrinsic, this won't be lowered to a function call. This means we don't
8716   // have to worry about calling conventions and target specific lowering code.
8717   // Instead we perform the call lowering right here.
8718   //
8719   // chain, flag = CALLSEQ_START(chain, 0, 0)
8720   // chain, flag = STACKMAP(id, nbytes, ..., chain, flag)
8721   // chain, flag = CALLSEQ_END(chain, 0, 0, flag)
8722   //
8723   Chain = DAG.getCALLSEQ_START(getRoot(), 0, 0, DL);
8724   InFlag = Chain.getValue(1);
8725 
8726   // Add the <id> and <numBytes> constants.
8727   SDValue IDVal = getValue(CI.getOperand(PatchPointOpers::IDPos));
8728   Ops.push_back(DAG.getTargetConstant(
8729                   cast<ConstantSDNode>(IDVal)->getZExtValue(), DL, MVT::i64));
8730   SDValue NBytesVal = getValue(CI.getOperand(PatchPointOpers::NBytesPos));
8731   Ops.push_back(DAG.getTargetConstant(
8732                   cast<ConstantSDNode>(NBytesVal)->getZExtValue(), DL,
8733                   MVT::i32));
8734 
8735   // Push live variables for the stack map.
8736   addStackMapLiveVars(CI, 2, DL, Ops, *this);
8737 
8738   // We are not pushing any register mask info here on the operands list,
8739   // because the stackmap doesn't clobber anything.
8740 
8741   // Push the chain and the glue flag.
8742   Ops.push_back(Chain);
8743   Ops.push_back(InFlag);
8744 
8745   // Create the STACKMAP node.
8746   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
8747   SDNode *SM = DAG.getMachineNode(TargetOpcode::STACKMAP, DL, NodeTys, Ops);
8748   Chain = SDValue(SM, 0);
8749   InFlag = Chain.getValue(1);
8750 
8751   Chain = DAG.getCALLSEQ_END(Chain, NullPtr, NullPtr, InFlag, DL);
8752 
8753   // Stackmaps don't generate values, so nothing goes into the NodeMap.
8754 
8755   // Set the root to the target-lowered call chain.
8756   DAG.setRoot(Chain);
8757 
8758   // Inform the Frame Information that we have a stackmap in this function.
8759   FuncInfo.MF->getFrameInfo().setHasStackMap();
8760 }
8761 
8762 /// Lower llvm.experimental.patchpoint directly to its target opcode.
8763 void SelectionDAGBuilder::visitPatchpoint(const CallBase &CB,
8764                                           const BasicBlock *EHPadBB) {
8765   // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
8766   //                                                 i32 <numBytes>,
8767   //                                                 i8* <target>,
8768   //                                                 i32 <numArgs>,
8769   //                                                 [Args...],
8770   //                                                 [live variables...])
8771 
8772   CallingConv::ID CC = CB.getCallingConv();
8773   bool IsAnyRegCC = CC == CallingConv::AnyReg;
8774   bool HasDef = !CB.getType()->isVoidTy();
8775   SDLoc dl = getCurSDLoc();
8776   SDValue Callee = getValue(CB.getArgOperand(PatchPointOpers::TargetPos));
8777 
8778   // Handle immediate and symbolic callees.
8779   if (auto* ConstCallee = dyn_cast<ConstantSDNode>(Callee))
8780     Callee = DAG.getIntPtrConstant(ConstCallee->getZExtValue(), dl,
8781                                    /*isTarget=*/true);
8782   else if (auto* SymbolicCallee = dyn_cast<GlobalAddressSDNode>(Callee))
8783     Callee =  DAG.getTargetGlobalAddress(SymbolicCallee->getGlobal(),
8784                                          SDLoc(SymbolicCallee),
8785                                          SymbolicCallee->getValueType(0));
8786 
8787   // Get the real number of arguments participating in the call <numArgs>
8788   SDValue NArgVal = getValue(CB.getArgOperand(PatchPointOpers::NArgPos));
8789   unsigned NumArgs = cast<ConstantSDNode>(NArgVal)->getZExtValue();
8790 
8791   // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
8792   // Intrinsics include all meta-operands up to but not including CC.
8793   unsigned NumMetaOpers = PatchPointOpers::CCPos;
8794   assert(CB.arg_size() >= NumMetaOpers + NumArgs &&
8795          "Not enough arguments provided to the patchpoint intrinsic");
8796 
8797   // For AnyRegCC the arguments are lowered later on manually.
8798   unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
8799   Type *ReturnTy =
8800       IsAnyRegCC ? Type::getVoidTy(*DAG.getContext()) : CB.getType();
8801 
8802   TargetLowering::CallLoweringInfo CLI(DAG);
8803   populateCallLoweringInfo(CLI, &CB, NumMetaOpers, NumCallArgs, Callee,
8804                            ReturnTy, true);
8805   std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
8806 
8807   SDNode *CallEnd = Result.second.getNode();
8808   if (HasDef && (CallEnd->getOpcode() == ISD::CopyFromReg))
8809     CallEnd = CallEnd->getOperand(0).getNode();
8810 
8811   /// Get a call instruction from the call sequence chain.
8812   /// Tail calls are not allowed.
8813   assert(CallEnd->getOpcode() == ISD::CALLSEQ_END &&
8814          "Expected a callseq node.");
8815   SDNode *Call = CallEnd->getOperand(0).getNode();
8816   bool HasGlue = Call->getGluedNode();
8817 
8818   // Replace the target specific call node with the patchable intrinsic.
8819   SmallVector<SDValue, 8> Ops;
8820 
8821   // Add the <id> and <numBytes> constants.
8822   SDValue IDVal = getValue(CB.getArgOperand(PatchPointOpers::IDPos));
8823   Ops.push_back(DAG.getTargetConstant(
8824                   cast<ConstantSDNode>(IDVal)->getZExtValue(), dl, MVT::i64));
8825   SDValue NBytesVal = getValue(CB.getArgOperand(PatchPointOpers::NBytesPos));
8826   Ops.push_back(DAG.getTargetConstant(
8827                   cast<ConstantSDNode>(NBytesVal)->getZExtValue(), dl,
8828                   MVT::i32));
8829 
8830   // Add the callee.
8831   Ops.push_back(Callee);
8832 
8833   // Adjust <numArgs> to account for any arguments that have been passed on the
8834   // stack instead.
8835   // Call Node: Chain, Target, {Args}, RegMask, [Glue]
8836   unsigned NumCallRegArgs = Call->getNumOperands() - (HasGlue ? 4 : 3);
8837   NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs;
8838   Ops.push_back(DAG.getTargetConstant(NumCallRegArgs, dl, MVT::i32));
8839 
8840   // Add the calling convention
8841   Ops.push_back(DAG.getTargetConstant((unsigned)CC, dl, MVT::i32));
8842 
8843   // Add the arguments we omitted previously. The register allocator should
8844   // place these in any free register.
8845   if (IsAnyRegCC)
8846     for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i)
8847       Ops.push_back(getValue(CB.getArgOperand(i)));
8848 
8849   // Push the arguments from the call instruction up to the register mask.
8850   SDNode::op_iterator e = HasGlue ? Call->op_end()-2 : Call->op_end()-1;
8851   Ops.append(Call->op_begin() + 2, e);
8852 
8853   // Push live variables for the stack map.
8854   addStackMapLiveVars(CB, NumMetaOpers + NumArgs, dl, Ops, *this);
8855 
8856   // Push the register mask info.
8857   if (HasGlue)
8858     Ops.push_back(*(Call->op_end()-2));
8859   else
8860     Ops.push_back(*(Call->op_end()-1));
8861 
8862   // Push the chain (this is originally the first operand of the call, but
8863   // becomes now the last or second to last operand).
8864   Ops.push_back(*(Call->op_begin()));
8865 
8866   // Push the glue flag (last operand).
8867   if (HasGlue)
8868     Ops.push_back(*(Call->op_end()-1));
8869 
8870   SDVTList NodeTys;
8871   if (IsAnyRegCC && HasDef) {
8872     // Create the return types based on the intrinsic definition
8873     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8874     SmallVector<EVT, 3> ValueVTs;
8875     ComputeValueVTs(TLI, DAG.getDataLayout(), CB.getType(), ValueVTs);
8876     assert(ValueVTs.size() == 1 && "Expected only one return value type.");
8877 
8878     // There is always a chain and a glue type at the end
8879     ValueVTs.push_back(MVT::Other);
8880     ValueVTs.push_back(MVT::Glue);
8881     NodeTys = DAG.getVTList(ValueVTs);
8882   } else
8883     NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
8884 
8885   // Replace the target specific call node with a PATCHPOINT node.
8886   MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHPOINT,
8887                                          dl, NodeTys, Ops);
8888 
8889   // Update the NodeMap.
8890   if (HasDef) {
8891     if (IsAnyRegCC)
8892       setValue(&CB, SDValue(MN, 0));
8893     else
8894       setValue(&CB, Result.first);
8895   }
8896 
8897   // Fixup the consumers of the intrinsic. The chain and glue may be used in the
8898   // call sequence. Furthermore the location of the chain and glue can change
8899   // when the AnyReg calling convention is used and the intrinsic returns a
8900   // value.
8901   if (IsAnyRegCC && HasDef) {
8902     SDValue From[] = {SDValue(Call, 0), SDValue(Call, 1)};
8903     SDValue To[] = {SDValue(MN, 1), SDValue(MN, 2)};
8904     DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
8905   } else
8906     DAG.ReplaceAllUsesWith(Call, MN);
8907   DAG.DeleteNode(Call);
8908 
8909   // Inform the Frame Information that we have a patchpoint in this function.
8910   FuncInfo.MF->getFrameInfo().setHasPatchPoint();
8911 }
8912 
8913 void SelectionDAGBuilder::visitVectorReduce(const CallInst &I,
8914                                             unsigned Intrinsic) {
8915   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8916   SDValue Op1 = getValue(I.getArgOperand(0));
8917   SDValue Op2;
8918   if (I.getNumArgOperands() > 1)
8919     Op2 = getValue(I.getArgOperand(1));
8920   SDLoc dl = getCurSDLoc();
8921   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
8922   SDValue Res;
8923   FastMathFlags FMF;
8924   if (isa<FPMathOperator>(I))
8925     FMF = I.getFastMathFlags();
8926 
8927   switch (Intrinsic) {
8928   case Intrinsic::experimental_vector_reduce_v2_fadd:
8929     if (FMF.allowReassoc())
8930       Res = DAG.getNode(ISD::FADD, dl, VT, Op1,
8931                         DAG.getNode(ISD::VECREDUCE_FADD, dl, VT, Op2));
8932     else
8933       Res = DAG.getNode(ISD::VECREDUCE_STRICT_FADD, dl, VT, Op1, Op2);
8934     break;
8935   case Intrinsic::experimental_vector_reduce_v2_fmul:
8936     if (FMF.allowReassoc())
8937       Res = DAG.getNode(ISD::FMUL, dl, VT, Op1,
8938                         DAG.getNode(ISD::VECREDUCE_FMUL, dl, VT, Op2));
8939     else
8940       Res = DAG.getNode(ISD::VECREDUCE_STRICT_FMUL, dl, VT, Op1, Op2);
8941     break;
8942   case Intrinsic::experimental_vector_reduce_add:
8943     Res = DAG.getNode(ISD::VECREDUCE_ADD, dl, VT, Op1);
8944     break;
8945   case Intrinsic::experimental_vector_reduce_mul:
8946     Res = DAG.getNode(ISD::VECREDUCE_MUL, dl, VT, Op1);
8947     break;
8948   case Intrinsic::experimental_vector_reduce_and:
8949     Res = DAG.getNode(ISD::VECREDUCE_AND, dl, VT, Op1);
8950     break;
8951   case Intrinsic::experimental_vector_reduce_or:
8952     Res = DAG.getNode(ISD::VECREDUCE_OR, dl, VT, Op1);
8953     break;
8954   case Intrinsic::experimental_vector_reduce_xor:
8955     Res = DAG.getNode(ISD::VECREDUCE_XOR, dl, VT, Op1);
8956     break;
8957   case Intrinsic::experimental_vector_reduce_smax:
8958     Res = DAG.getNode(ISD::VECREDUCE_SMAX, dl, VT, Op1);
8959     break;
8960   case Intrinsic::experimental_vector_reduce_smin:
8961     Res = DAG.getNode(ISD::VECREDUCE_SMIN, dl, VT, Op1);
8962     break;
8963   case Intrinsic::experimental_vector_reduce_umax:
8964     Res = DAG.getNode(ISD::VECREDUCE_UMAX, dl, VT, Op1);
8965     break;
8966   case Intrinsic::experimental_vector_reduce_umin:
8967     Res = DAG.getNode(ISD::VECREDUCE_UMIN, dl, VT, Op1);
8968     break;
8969   case Intrinsic::experimental_vector_reduce_fmax:
8970     Res = DAG.getNode(ISD::VECREDUCE_FMAX, dl, VT, Op1);
8971     break;
8972   case Intrinsic::experimental_vector_reduce_fmin:
8973     Res = DAG.getNode(ISD::VECREDUCE_FMIN, dl, VT, Op1);
8974     break;
8975   default:
8976     llvm_unreachable("Unhandled vector reduce intrinsic");
8977   }
8978   setValue(&I, Res);
8979 }
8980 
8981 /// Returns an AttributeList representing the attributes applied to the return
8982 /// value of the given call.
8983 static AttributeList getReturnAttrs(TargetLowering::CallLoweringInfo &CLI) {
8984   SmallVector<Attribute::AttrKind, 2> Attrs;
8985   if (CLI.RetSExt)
8986     Attrs.push_back(Attribute::SExt);
8987   if (CLI.RetZExt)
8988     Attrs.push_back(Attribute::ZExt);
8989   if (CLI.IsInReg)
8990     Attrs.push_back(Attribute::InReg);
8991 
8992   return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex,
8993                             Attrs);
8994 }
8995 
8996 /// TargetLowering::LowerCallTo - This is the default LowerCallTo
8997 /// implementation, which just calls LowerCall.
8998 /// FIXME: When all targets are
8999 /// migrated to using LowerCall, this hook should be integrated into SDISel.
9000 std::pair<SDValue, SDValue>
9001 TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
9002   // Handle the incoming return values from the call.
9003   CLI.Ins.clear();
9004   Type *OrigRetTy = CLI.RetTy;
9005   SmallVector<EVT, 4> RetTys;
9006   SmallVector<uint64_t, 4> Offsets;
9007   auto &DL = CLI.DAG.getDataLayout();
9008   ComputeValueVTs(*this, DL, CLI.RetTy, RetTys, &Offsets);
9009 
9010   if (CLI.IsPostTypeLegalization) {
9011     // If we are lowering a libcall after legalization, split the return type.
9012     SmallVector<EVT, 4> OldRetTys;
9013     SmallVector<uint64_t, 4> OldOffsets;
9014     RetTys.swap(OldRetTys);
9015     Offsets.swap(OldOffsets);
9016 
9017     for (size_t i = 0, e = OldRetTys.size(); i != e; ++i) {
9018       EVT RetVT = OldRetTys[i];
9019       uint64_t Offset = OldOffsets[i];
9020       MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), RetVT);
9021       unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), RetVT);
9022       unsigned RegisterVTByteSZ = RegisterVT.getSizeInBits() / 8;
9023       RetTys.append(NumRegs, RegisterVT);
9024       for (unsigned j = 0; j != NumRegs; ++j)
9025         Offsets.push_back(Offset + j * RegisterVTByteSZ);
9026     }
9027   }
9028 
9029   SmallVector<ISD::OutputArg, 4> Outs;
9030   GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, *this, DL);
9031 
9032   bool CanLowerReturn =
9033       this->CanLowerReturn(CLI.CallConv, CLI.DAG.getMachineFunction(),
9034                            CLI.IsVarArg, Outs, CLI.RetTy->getContext());
9035 
9036   SDValue DemoteStackSlot;
9037   int DemoteStackIdx = -100;
9038   if (!CanLowerReturn) {
9039     // FIXME: equivalent assert?
9040     // assert(!CS.hasInAllocaArgument() &&
9041     //        "sret demotion is incompatible with inalloca");
9042     uint64_t TySize = DL.getTypeAllocSize(CLI.RetTy);
9043     Align Alignment = DL.getPrefTypeAlign(CLI.RetTy);
9044     MachineFunction &MF = CLI.DAG.getMachineFunction();
9045     DemoteStackIdx =
9046         MF.getFrameInfo().CreateStackObject(TySize, Alignment, false);
9047     Type *StackSlotPtrType = PointerType::get(CLI.RetTy,
9048                                               DL.getAllocaAddrSpace());
9049 
9050     DemoteStackSlot = CLI.DAG.getFrameIndex(DemoteStackIdx, getFrameIndexTy(DL));
9051     ArgListEntry Entry;
9052     Entry.Node = DemoteStackSlot;
9053     Entry.Ty = StackSlotPtrType;
9054     Entry.IsSExt = false;
9055     Entry.IsZExt = false;
9056     Entry.IsInReg = false;
9057     Entry.IsSRet = true;
9058     Entry.IsNest = false;
9059     Entry.IsByVal = false;
9060     Entry.IsReturned = false;
9061     Entry.IsSwiftSelf = false;
9062     Entry.IsSwiftError = false;
9063     Entry.IsCFGuardTarget = false;
9064     Entry.Alignment = Alignment;
9065     CLI.getArgs().insert(CLI.getArgs().begin(), Entry);
9066     CLI.NumFixedArgs += 1;
9067     CLI.RetTy = Type::getVoidTy(CLI.RetTy->getContext());
9068 
9069     // sret demotion isn't compatible with tail-calls, since the sret argument
9070     // points into the callers stack frame.
9071     CLI.IsTailCall = false;
9072   } else {
9073     bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
9074         CLI.RetTy, CLI.CallConv, CLI.IsVarArg);
9075     for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
9076       ISD::ArgFlagsTy Flags;
9077       if (NeedsRegBlock) {
9078         Flags.setInConsecutiveRegs();
9079         if (I == RetTys.size() - 1)
9080           Flags.setInConsecutiveRegsLast();
9081       }
9082       EVT VT = RetTys[I];
9083       MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
9084                                                      CLI.CallConv, VT);
9085       unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
9086                                                        CLI.CallConv, VT);
9087       for (unsigned i = 0; i != NumRegs; ++i) {
9088         ISD::InputArg MyFlags;
9089         MyFlags.Flags = Flags;
9090         MyFlags.VT = RegisterVT;
9091         MyFlags.ArgVT = VT;
9092         MyFlags.Used = CLI.IsReturnValueUsed;
9093         if (CLI.RetTy->isPointerTy()) {
9094           MyFlags.Flags.setPointer();
9095           MyFlags.Flags.setPointerAddrSpace(
9096               cast<PointerType>(CLI.RetTy)->getAddressSpace());
9097         }
9098         if (CLI.RetSExt)
9099           MyFlags.Flags.setSExt();
9100         if (CLI.RetZExt)
9101           MyFlags.Flags.setZExt();
9102         if (CLI.IsInReg)
9103           MyFlags.Flags.setInReg();
9104         CLI.Ins.push_back(MyFlags);
9105       }
9106     }
9107   }
9108 
9109   // We push in swifterror return as the last element of CLI.Ins.
9110   ArgListTy &Args = CLI.getArgs();
9111   if (supportSwiftError()) {
9112     for (unsigned i = 0, e = Args.size(); i != e; ++i) {
9113       if (Args[i].IsSwiftError) {
9114         ISD::InputArg MyFlags;
9115         MyFlags.VT = getPointerTy(DL);
9116         MyFlags.ArgVT = EVT(getPointerTy(DL));
9117         MyFlags.Flags.setSwiftError();
9118         CLI.Ins.push_back(MyFlags);
9119       }
9120     }
9121   }
9122 
9123   // Handle all of the outgoing arguments.
9124   CLI.Outs.clear();
9125   CLI.OutVals.clear();
9126   for (unsigned i = 0, e = Args.size(); i != e; ++i) {
9127     SmallVector<EVT, 4> ValueVTs;
9128     ComputeValueVTs(*this, DL, Args[i].Ty, ValueVTs);
9129     // FIXME: Split arguments if CLI.IsPostTypeLegalization
9130     Type *FinalType = Args[i].Ty;
9131     if (Args[i].IsByVal)
9132       FinalType = cast<PointerType>(Args[i].Ty)->getElementType();
9133     bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
9134         FinalType, CLI.CallConv, CLI.IsVarArg);
9135     for (unsigned Value = 0, NumValues = ValueVTs.size(); Value != NumValues;
9136          ++Value) {
9137       EVT VT = ValueVTs[Value];
9138       Type *ArgTy = VT.getTypeForEVT(CLI.RetTy->getContext());
9139       SDValue Op = SDValue(Args[i].Node.getNode(),
9140                            Args[i].Node.getResNo() + Value);
9141       ISD::ArgFlagsTy Flags;
9142 
9143       // Certain targets (such as MIPS), may have a different ABI alignment
9144       // for a type depending on the context. Give the target a chance to
9145       // specify the alignment it wants.
9146       const Align OriginalAlignment(getABIAlignmentForCallingConv(ArgTy, DL));
9147 
9148       if (Args[i].Ty->isPointerTy()) {
9149         Flags.setPointer();
9150         Flags.setPointerAddrSpace(
9151             cast<PointerType>(Args[i].Ty)->getAddressSpace());
9152       }
9153       if (Args[i].IsZExt)
9154         Flags.setZExt();
9155       if (Args[i].IsSExt)
9156         Flags.setSExt();
9157       if (Args[i].IsInReg) {
9158         // If we are using vectorcall calling convention, a structure that is
9159         // passed InReg - is surely an HVA
9160         if (CLI.CallConv == CallingConv::X86_VectorCall &&
9161             isa<StructType>(FinalType)) {
9162           // The first value of a structure is marked
9163           if (0 == Value)
9164             Flags.setHvaStart();
9165           Flags.setHva();
9166         }
9167         // Set InReg Flag
9168         Flags.setInReg();
9169       }
9170       if (Args[i].IsSRet)
9171         Flags.setSRet();
9172       if (Args[i].IsSwiftSelf)
9173         Flags.setSwiftSelf();
9174       if (Args[i].IsSwiftError)
9175         Flags.setSwiftError();
9176       if (Args[i].IsCFGuardTarget)
9177         Flags.setCFGuardTarget();
9178       if (Args[i].IsByVal)
9179         Flags.setByVal();
9180       if (Args[i].IsPreallocated) {
9181         Flags.setPreallocated();
9182         // Set the byval flag for CCAssignFn callbacks that don't know about
9183         // preallocated.  This way we can know how many bytes we should've
9184         // allocated and how many bytes a callee cleanup function will pop.  If
9185         // we port preallocated to more targets, we'll have to add custom
9186         // preallocated handling in the various CC lowering callbacks.
9187         Flags.setByVal();
9188       }
9189       if (Args[i].IsInAlloca) {
9190         Flags.setInAlloca();
9191         // Set the byval flag for CCAssignFn callbacks that don't know about
9192         // inalloca.  This way we can know how many bytes we should've allocated
9193         // and how many bytes a callee cleanup function will pop.  If we port
9194         // inalloca to more targets, we'll have to add custom inalloca handling
9195         // in the various CC lowering callbacks.
9196         Flags.setByVal();
9197       }
9198       if (Args[i].IsByVal || Args[i].IsInAlloca || Args[i].IsPreallocated) {
9199         PointerType *Ty = cast<PointerType>(Args[i].Ty);
9200         Type *ElementTy = Ty->getElementType();
9201 
9202         unsigned FrameSize = DL.getTypeAllocSize(
9203             Args[i].ByValType ? Args[i].ByValType : ElementTy);
9204         Flags.setByValSize(FrameSize);
9205 
9206         // info is not there but there are cases it cannot get right.
9207         Align FrameAlign;
9208         if (auto MA = Args[i].Alignment)
9209           FrameAlign = *MA;
9210         else
9211           FrameAlign = Align(getByValTypeAlignment(ElementTy, DL));
9212         Flags.setByValAlign(FrameAlign);
9213       }
9214       if (Args[i].IsNest)
9215         Flags.setNest();
9216       if (NeedsRegBlock)
9217         Flags.setInConsecutiveRegs();
9218       Flags.setOrigAlign(OriginalAlignment);
9219 
9220       MVT PartVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
9221                                                  CLI.CallConv, VT);
9222       unsigned NumParts = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
9223                                                         CLI.CallConv, VT);
9224       SmallVector<SDValue, 4> Parts(NumParts);
9225       ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
9226 
9227       if (Args[i].IsSExt)
9228         ExtendKind = ISD::SIGN_EXTEND;
9229       else if (Args[i].IsZExt)
9230         ExtendKind = ISD::ZERO_EXTEND;
9231 
9232       // Conservatively only handle 'returned' on non-vectors that can be lowered,
9233       // for now.
9234       if (Args[i].IsReturned && !Op.getValueType().isVector() &&
9235           CanLowerReturn) {
9236         assert((CLI.RetTy == Args[i].Ty ||
9237                 (CLI.RetTy->isPointerTy() && Args[i].Ty->isPointerTy() &&
9238                  CLI.RetTy->getPointerAddressSpace() ==
9239                      Args[i].Ty->getPointerAddressSpace())) &&
9240                RetTys.size() == NumValues && "unexpected use of 'returned'");
9241         // Before passing 'returned' to the target lowering code, ensure that
9242         // either the register MVT and the actual EVT are the same size or that
9243         // the return value and argument are extended in the same way; in these
9244         // cases it's safe to pass the argument register value unchanged as the
9245         // return register value (although it's at the target's option whether
9246         // to do so)
9247         // TODO: allow code generation to take advantage of partially preserved
9248         // registers rather than clobbering the entire register when the
9249         // parameter extension method is not compatible with the return
9250         // extension method
9251         if ((NumParts * PartVT.getSizeInBits() == VT.getSizeInBits()) ||
9252             (ExtendKind != ISD::ANY_EXTEND && CLI.RetSExt == Args[i].IsSExt &&
9253              CLI.RetZExt == Args[i].IsZExt))
9254           Flags.setReturned();
9255       }
9256 
9257       getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT, CLI.CB,
9258                      CLI.CallConv, ExtendKind);
9259 
9260       for (unsigned j = 0; j != NumParts; ++j) {
9261         // if it isn't first piece, alignment must be 1
9262         // For scalable vectors the scalable part is currently handled
9263         // by individual targets, so we just use the known minimum size here.
9264         ISD::OutputArg MyFlags(Flags, Parts[j].getValueType(), VT,
9265                     i < CLI.NumFixedArgs, i,
9266                     j*Parts[j].getValueType().getStoreSize().getKnownMinSize());
9267         if (NumParts > 1 && j == 0)
9268           MyFlags.Flags.setSplit();
9269         else if (j != 0) {
9270           MyFlags.Flags.setOrigAlign(Align(1));
9271           if (j == NumParts - 1)
9272             MyFlags.Flags.setSplitEnd();
9273         }
9274 
9275         CLI.Outs.push_back(MyFlags);
9276         CLI.OutVals.push_back(Parts[j]);
9277       }
9278 
9279       if (NeedsRegBlock && Value == NumValues - 1)
9280         CLI.Outs[CLI.Outs.size() - 1].Flags.setInConsecutiveRegsLast();
9281     }
9282   }
9283 
9284   SmallVector<SDValue, 4> InVals;
9285   CLI.Chain = LowerCall(CLI, InVals);
9286 
9287   // Update CLI.InVals to use outside of this function.
9288   CLI.InVals = InVals;
9289 
9290   // Verify that the target's LowerCall behaved as expected.
9291   assert(CLI.Chain.getNode() && CLI.Chain.getValueType() == MVT::Other &&
9292          "LowerCall didn't return a valid chain!");
9293   assert((!CLI.IsTailCall || InVals.empty()) &&
9294          "LowerCall emitted a return value for a tail call!");
9295   assert((CLI.IsTailCall || InVals.size() == CLI.Ins.size()) &&
9296          "LowerCall didn't emit the correct number of values!");
9297 
9298   // For a tail call, the return value is merely live-out and there aren't
9299   // any nodes in the DAG representing it. Return a special value to
9300   // indicate that a tail call has been emitted and no more Instructions
9301   // should be processed in the current block.
9302   if (CLI.IsTailCall) {
9303     CLI.DAG.setRoot(CLI.Chain);
9304     return std::make_pair(SDValue(), SDValue());
9305   }
9306 
9307 #ifndef NDEBUG
9308   for (unsigned i = 0, e = CLI.Ins.size(); i != e; ++i) {
9309     assert(InVals[i].getNode() && "LowerCall emitted a null value!");
9310     assert(EVT(CLI.Ins[i].VT) == InVals[i].getValueType() &&
9311            "LowerCall emitted a value with the wrong type!");
9312   }
9313 #endif
9314 
9315   SmallVector<SDValue, 4> ReturnValues;
9316   if (!CanLowerReturn) {
9317     // The instruction result is the result of loading from the
9318     // hidden sret parameter.
9319     SmallVector<EVT, 1> PVTs;
9320     Type *PtrRetTy = OrigRetTy->getPointerTo(DL.getAllocaAddrSpace());
9321 
9322     ComputeValueVTs(*this, DL, PtrRetTy, PVTs);
9323     assert(PVTs.size() == 1 && "Pointers should fit in one register");
9324     EVT PtrVT = PVTs[0];
9325 
9326     unsigned NumValues = RetTys.size();
9327     ReturnValues.resize(NumValues);
9328     SmallVector<SDValue, 4> Chains(NumValues);
9329 
9330     // An aggregate return value cannot wrap around the address space, so
9331     // offsets to its parts don't wrap either.
9332     SDNodeFlags Flags;
9333     Flags.setNoUnsignedWrap(true);
9334 
9335     MachineFunction &MF = CLI.DAG.getMachineFunction();
9336     Align HiddenSRetAlign = MF.getFrameInfo().getObjectAlign(DemoteStackIdx);
9337     for (unsigned i = 0; i < NumValues; ++i) {
9338       SDValue Add = CLI.DAG.getNode(ISD::ADD, CLI.DL, PtrVT, DemoteStackSlot,
9339                                     CLI.DAG.getConstant(Offsets[i], CLI.DL,
9340                                                         PtrVT), Flags);
9341       SDValue L = CLI.DAG.getLoad(
9342           RetTys[i], CLI.DL, CLI.Chain, Add,
9343           MachinePointerInfo::getFixedStack(CLI.DAG.getMachineFunction(),
9344                                             DemoteStackIdx, Offsets[i]),
9345           HiddenSRetAlign);
9346       ReturnValues[i] = L;
9347       Chains[i] = L.getValue(1);
9348     }
9349 
9350     CLI.Chain = CLI.DAG.getNode(ISD::TokenFactor, CLI.DL, MVT::Other, Chains);
9351   } else {
9352     // Collect the legal value parts into potentially illegal values
9353     // that correspond to the original function's return values.
9354     Optional<ISD::NodeType> AssertOp;
9355     if (CLI.RetSExt)
9356       AssertOp = ISD::AssertSext;
9357     else if (CLI.RetZExt)
9358       AssertOp = ISD::AssertZext;
9359     unsigned CurReg = 0;
9360     for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
9361       EVT VT = RetTys[I];
9362       MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
9363                                                      CLI.CallConv, VT);
9364       unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
9365                                                        CLI.CallConv, VT);
9366 
9367       ReturnValues.push_back(getCopyFromParts(CLI.DAG, CLI.DL, &InVals[CurReg],
9368                                               NumRegs, RegisterVT, VT, nullptr,
9369                                               CLI.CallConv, AssertOp));
9370       CurReg += NumRegs;
9371     }
9372 
9373     // For a function returning void, there is no return value. We can't create
9374     // such a node, so we just return a null return value in that case. In
9375     // that case, nothing will actually look at the value.
9376     if (ReturnValues.empty())
9377       return std::make_pair(SDValue(), CLI.Chain);
9378   }
9379 
9380   SDValue Res = CLI.DAG.getNode(ISD::MERGE_VALUES, CLI.DL,
9381                                 CLI.DAG.getVTList(RetTys), ReturnValues);
9382   return std::make_pair(Res, CLI.Chain);
9383 }
9384 
9385 void TargetLowering::LowerOperationWrapper(SDNode *N,
9386                                            SmallVectorImpl<SDValue> &Results,
9387                                            SelectionDAG &DAG) const {
9388   if (SDValue Res = LowerOperation(SDValue(N, 0), DAG))
9389     Results.push_back(Res);
9390 }
9391 
9392 SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
9393   llvm_unreachable("LowerOperation not implemented for this target!");
9394 }
9395 
9396 void
9397 SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) {
9398   SDValue Op = getNonRegisterValue(V);
9399   assert((Op.getOpcode() != ISD::CopyFromReg ||
9400           cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
9401          "Copy from a reg to the same reg!");
9402   assert(!Register::isPhysicalRegister(Reg) && "Is a physreg");
9403 
9404   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9405   // If this is an InlineAsm we have to match the registers required, not the
9406   // notional registers required by the type.
9407 
9408   RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg, V->getType(),
9409                    None); // This is not an ABI copy.
9410   SDValue Chain = DAG.getEntryNode();
9411 
9412   ISD::NodeType ExtendType = (FuncInfo.PreferredExtendType.find(V) ==
9413                               FuncInfo.PreferredExtendType.end())
9414                                  ? ISD::ANY_EXTEND
9415                                  : FuncInfo.PreferredExtendType[V];
9416   RFV.getCopyToRegs(Op, DAG, getCurSDLoc(), Chain, nullptr, V, ExtendType);
9417   PendingExports.push_back(Chain);
9418 }
9419 
9420 #include "llvm/CodeGen/SelectionDAGISel.h"
9421 
9422 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
9423 /// entry block, return true.  This includes arguments used by switches, since
9424 /// the switch may expand into multiple basic blocks.
9425 static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) {
9426   // With FastISel active, we may be splitting blocks, so force creation
9427   // of virtual registers for all non-dead arguments.
9428   if (FastISel)
9429     return A->use_empty();
9430 
9431   const BasicBlock &Entry = A->getParent()->front();
9432   for (const User *U : A->users())
9433     if (cast<Instruction>(U)->getParent() != &Entry || isa<SwitchInst>(U))
9434       return false;  // Use not in entry block.
9435 
9436   return true;
9437 }
9438 
9439 using ArgCopyElisionMapTy =
9440     DenseMap<const Argument *,
9441              std::pair<const AllocaInst *, const StoreInst *>>;
9442 
9443 /// Scan the entry block of the function in FuncInfo for arguments that look
9444 /// like copies into a local alloca. Record any copied arguments in
9445 /// ArgCopyElisionCandidates.
9446 static void
9447 findArgumentCopyElisionCandidates(const DataLayout &DL,
9448                                   FunctionLoweringInfo *FuncInfo,
9449                                   ArgCopyElisionMapTy &ArgCopyElisionCandidates) {
9450   // Record the state of every static alloca used in the entry block. Argument
9451   // allocas are all used in the entry block, so we need approximately as many
9452   // entries as we have arguments.
9453   enum StaticAllocaInfo { Unknown, Clobbered, Elidable };
9454   SmallDenseMap<const AllocaInst *, StaticAllocaInfo, 8> StaticAllocas;
9455   unsigned NumArgs = FuncInfo->Fn->arg_size();
9456   StaticAllocas.reserve(NumArgs * 2);
9457 
9458   auto GetInfoIfStaticAlloca = [&](const Value *V) -> StaticAllocaInfo * {
9459     if (!V)
9460       return nullptr;
9461     V = V->stripPointerCasts();
9462     const auto *AI = dyn_cast<AllocaInst>(V);
9463     if (!AI || !AI->isStaticAlloca() || !FuncInfo->StaticAllocaMap.count(AI))
9464       return nullptr;
9465     auto Iter = StaticAllocas.insert({AI, Unknown});
9466     return &Iter.first->second;
9467   };
9468 
9469   // Look for stores of arguments to static allocas. Look through bitcasts and
9470   // GEPs to handle type coercions, as long as the alloca is fully initialized
9471   // by the store. Any non-store use of an alloca escapes it and any subsequent
9472   // unanalyzed store might write it.
9473   // FIXME: Handle structs initialized with multiple stores.
9474   for (const Instruction &I : FuncInfo->Fn->getEntryBlock()) {
9475     // Look for stores, and handle non-store uses conservatively.
9476     const auto *SI = dyn_cast<StoreInst>(&I);
9477     if (!SI) {
9478       // We will look through cast uses, so ignore them completely.
9479       if (I.isCast())
9480         continue;
9481       // Ignore debug info intrinsics, they don't escape or store to allocas.
9482       if (isa<DbgInfoIntrinsic>(I))
9483         continue;
9484       // This is an unknown instruction. Assume it escapes or writes to all
9485       // static alloca operands.
9486       for (const Use &U : I.operands()) {
9487         if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(U))
9488           *Info = StaticAllocaInfo::Clobbered;
9489       }
9490       continue;
9491     }
9492 
9493     // If the stored value is a static alloca, mark it as escaped.
9494     if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(SI->getValueOperand()))
9495       *Info = StaticAllocaInfo::Clobbered;
9496 
9497     // Check if the destination is a static alloca.
9498     const Value *Dst = SI->getPointerOperand()->stripPointerCasts();
9499     StaticAllocaInfo *Info = GetInfoIfStaticAlloca(Dst);
9500     if (!Info)
9501       continue;
9502     const AllocaInst *AI = cast<AllocaInst>(Dst);
9503 
9504     // Skip allocas that have been initialized or clobbered.
9505     if (*Info != StaticAllocaInfo::Unknown)
9506       continue;
9507 
9508     // Check if the stored value is an argument, and that this store fully
9509     // initializes the alloca. Don't elide copies from the same argument twice.
9510     const Value *Val = SI->getValueOperand()->stripPointerCasts();
9511     const auto *Arg = dyn_cast<Argument>(Val);
9512     if (!Arg || Arg->hasPassPointeeByValueAttr() ||
9513         Arg->getType()->isEmptyTy() ||
9514         DL.getTypeStoreSize(Arg->getType()) !=
9515             DL.getTypeAllocSize(AI->getAllocatedType()) ||
9516         ArgCopyElisionCandidates.count(Arg)) {
9517       *Info = StaticAllocaInfo::Clobbered;
9518       continue;
9519     }
9520 
9521     LLVM_DEBUG(dbgs() << "Found argument copy elision candidate: " << *AI
9522                       << '\n');
9523 
9524     // Mark this alloca and store for argument copy elision.
9525     *Info = StaticAllocaInfo::Elidable;
9526     ArgCopyElisionCandidates.insert({Arg, {AI, SI}});
9527 
9528     // Stop scanning if we've seen all arguments. This will happen early in -O0
9529     // builds, which is useful, because -O0 builds have large entry blocks and
9530     // many allocas.
9531     if (ArgCopyElisionCandidates.size() == NumArgs)
9532       break;
9533   }
9534 }
9535 
9536 /// Try to elide argument copies from memory into a local alloca. Succeeds if
9537 /// ArgVal is a load from a suitable fixed stack object.
9538 static void tryToElideArgumentCopy(
9539     FunctionLoweringInfo &FuncInfo, SmallVectorImpl<SDValue> &Chains,
9540     DenseMap<int, int> &ArgCopyElisionFrameIndexMap,
9541     SmallPtrSetImpl<const Instruction *> &ElidedArgCopyInstrs,
9542     ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg,
9543     SDValue ArgVal, bool &ArgHasUses) {
9544   // Check if this is a load from a fixed stack object.
9545   auto *LNode = dyn_cast<LoadSDNode>(ArgVal);
9546   if (!LNode)
9547     return;
9548   auto *FINode = dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode());
9549   if (!FINode)
9550     return;
9551 
9552   // Check that the fixed stack object is the right size and alignment.
9553   // Look at the alignment that the user wrote on the alloca instead of looking
9554   // at the stack object.
9555   auto ArgCopyIter = ArgCopyElisionCandidates.find(&Arg);
9556   assert(ArgCopyIter != ArgCopyElisionCandidates.end());
9557   const AllocaInst *AI = ArgCopyIter->second.first;
9558   int FixedIndex = FINode->getIndex();
9559   int &AllocaIndex = FuncInfo.StaticAllocaMap[AI];
9560   int OldIndex = AllocaIndex;
9561   MachineFrameInfo &MFI = FuncInfo.MF->getFrameInfo();
9562   if (MFI.getObjectSize(FixedIndex) != MFI.getObjectSize(OldIndex)) {
9563     LLVM_DEBUG(
9564         dbgs() << "  argument copy elision failed due to bad fixed stack "
9565                   "object size\n");
9566     return;
9567   }
9568   Align RequiredAlignment = AI->getAlign();
9569   if (MFI.getObjectAlign(FixedIndex) < RequiredAlignment) {
9570     LLVM_DEBUG(dbgs() << "  argument copy elision failed: alignment of alloca "
9571                          "greater than stack argument alignment ("
9572                       << DebugStr(RequiredAlignment) << " vs "
9573                       << DebugStr(MFI.getObjectAlign(FixedIndex)) << ")\n");
9574     return;
9575   }
9576 
9577   // Perform the elision. Delete the old stack object and replace its only use
9578   // in the variable info map. Mark the stack object as mutable.
9579   LLVM_DEBUG({
9580     dbgs() << "Eliding argument copy from " << Arg << " to " << *AI << '\n'
9581            << "  Replacing frame index " << OldIndex << " with " << FixedIndex
9582            << '\n';
9583   });
9584   MFI.RemoveStackObject(OldIndex);
9585   MFI.setIsImmutableObjectIndex(FixedIndex, false);
9586   AllocaIndex = FixedIndex;
9587   ArgCopyElisionFrameIndexMap.insert({OldIndex, FixedIndex});
9588   Chains.push_back(ArgVal.getValue(1));
9589 
9590   // Avoid emitting code for the store implementing the copy.
9591   const StoreInst *SI = ArgCopyIter->second.second;
9592   ElidedArgCopyInstrs.insert(SI);
9593 
9594   // Check for uses of the argument again so that we can avoid exporting ArgVal
9595   // if it is't used by anything other than the store.
9596   for (const Value *U : Arg.users()) {
9597     if (U != SI) {
9598       ArgHasUses = true;
9599       break;
9600     }
9601   }
9602 }
9603 
9604 void SelectionDAGISel::LowerArguments(const Function &F) {
9605   SelectionDAG &DAG = SDB->DAG;
9606   SDLoc dl = SDB->getCurSDLoc();
9607   const DataLayout &DL = DAG.getDataLayout();
9608   SmallVector<ISD::InputArg, 16> Ins;
9609 
9610   // In Naked functions we aren't going to save any registers.
9611   if (F.hasFnAttribute(Attribute::Naked))
9612     return;
9613 
9614   if (!FuncInfo->CanLowerReturn) {
9615     // Put in an sret pointer parameter before all the other parameters.
9616     SmallVector<EVT, 1> ValueVTs;
9617     ComputeValueVTs(*TLI, DAG.getDataLayout(),
9618                     F.getReturnType()->getPointerTo(
9619                         DAG.getDataLayout().getAllocaAddrSpace()),
9620                     ValueVTs);
9621 
9622     // NOTE: Assuming that a pointer will never break down to more than one VT
9623     // or one register.
9624     ISD::ArgFlagsTy Flags;
9625     Flags.setSRet();
9626     MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVTs[0]);
9627     ISD::InputArg RetArg(Flags, RegisterVT, ValueVTs[0], true,
9628                          ISD::InputArg::NoArgIndex, 0);
9629     Ins.push_back(RetArg);
9630   }
9631 
9632   // Look for stores of arguments to static allocas. Mark such arguments with a
9633   // flag to ask the target to give us the memory location of that argument if
9634   // available.
9635   ArgCopyElisionMapTy ArgCopyElisionCandidates;
9636   findArgumentCopyElisionCandidates(DL, FuncInfo.get(),
9637                                     ArgCopyElisionCandidates);
9638 
9639   // Set up the incoming argument description vector.
9640   for (const Argument &Arg : F.args()) {
9641     unsigned ArgNo = Arg.getArgNo();
9642     SmallVector<EVT, 4> ValueVTs;
9643     ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
9644     bool isArgValueUsed = !Arg.use_empty();
9645     unsigned PartBase = 0;
9646     Type *FinalType = Arg.getType();
9647     if (Arg.hasAttribute(Attribute::ByVal))
9648       FinalType = Arg.getParamByValType();
9649     bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
9650         FinalType, F.getCallingConv(), F.isVarArg());
9651     for (unsigned Value = 0, NumValues = ValueVTs.size();
9652          Value != NumValues; ++Value) {
9653       EVT VT = ValueVTs[Value];
9654       Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
9655       ISD::ArgFlagsTy Flags;
9656 
9657       // Certain targets (such as MIPS), may have a different ABI alignment
9658       // for a type depending on the context. Give the target a chance to
9659       // specify the alignment it wants.
9660       const Align OriginalAlignment(
9661           TLI->getABIAlignmentForCallingConv(ArgTy, DL));
9662 
9663       if (Arg.getType()->isPointerTy()) {
9664         Flags.setPointer();
9665         Flags.setPointerAddrSpace(
9666             cast<PointerType>(Arg.getType())->getAddressSpace());
9667       }
9668       if (Arg.hasAttribute(Attribute::ZExt))
9669         Flags.setZExt();
9670       if (Arg.hasAttribute(Attribute::SExt))
9671         Flags.setSExt();
9672       if (Arg.hasAttribute(Attribute::InReg)) {
9673         // If we are using vectorcall calling convention, a structure that is
9674         // passed InReg - is surely an HVA
9675         if (F.getCallingConv() == CallingConv::X86_VectorCall &&
9676             isa<StructType>(Arg.getType())) {
9677           // The first value of a structure is marked
9678           if (0 == Value)
9679             Flags.setHvaStart();
9680           Flags.setHva();
9681         }
9682         // Set InReg Flag
9683         Flags.setInReg();
9684       }
9685       if (Arg.hasAttribute(Attribute::StructRet))
9686         Flags.setSRet();
9687       if (Arg.hasAttribute(Attribute::SwiftSelf))
9688         Flags.setSwiftSelf();
9689       if (Arg.hasAttribute(Attribute::SwiftError))
9690         Flags.setSwiftError();
9691       if (Arg.hasAttribute(Attribute::ByVal))
9692         Flags.setByVal();
9693       if (Arg.hasAttribute(Attribute::InAlloca)) {
9694         Flags.setInAlloca();
9695         // Set the byval flag for CCAssignFn callbacks that don't know about
9696         // inalloca.  This way we can know how many bytes we should've allocated
9697         // and how many bytes a callee cleanup function will pop.  If we port
9698         // inalloca to more targets, we'll have to add custom inalloca handling
9699         // in the various CC lowering callbacks.
9700         Flags.setByVal();
9701       }
9702       if (Arg.hasAttribute(Attribute::Preallocated)) {
9703         Flags.setPreallocated();
9704         // Set the byval flag for CCAssignFn callbacks that don't know about
9705         // preallocated.  This way we can know how many bytes we should've
9706         // allocated and how many bytes a callee cleanup function will pop.  If
9707         // we port preallocated to more targets, we'll have to add custom
9708         // preallocated handling in the various CC lowering callbacks.
9709         Flags.setByVal();
9710       }
9711       if (F.getCallingConv() == CallingConv::X86_INTR) {
9712         // IA Interrupt passes frame (1st parameter) by value in the stack.
9713         if (ArgNo == 0)
9714           Flags.setByVal();
9715       }
9716       if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated()) {
9717         Type *ElementTy = Arg.getParamByValType();
9718 
9719         // For ByVal, size and alignment should be passed from FE.  BE will
9720         // guess if this info is not there but there are cases it cannot get
9721         // right.
9722         unsigned FrameSize = DL.getTypeAllocSize(Arg.getParamByValType());
9723         Flags.setByValSize(FrameSize);
9724 
9725         unsigned FrameAlign;
9726         if (Arg.getParamAlignment())
9727           FrameAlign = Arg.getParamAlignment();
9728         else
9729           FrameAlign = TLI->getByValTypeAlignment(ElementTy, DL);
9730         Flags.setByValAlign(Align(FrameAlign));
9731       }
9732       if (Arg.hasAttribute(Attribute::Nest))
9733         Flags.setNest();
9734       if (NeedsRegBlock)
9735         Flags.setInConsecutiveRegs();
9736       Flags.setOrigAlign(OriginalAlignment);
9737       if (ArgCopyElisionCandidates.count(&Arg))
9738         Flags.setCopyElisionCandidate();
9739       if (Arg.hasAttribute(Attribute::Returned))
9740         Flags.setReturned();
9741 
9742       MVT RegisterVT = TLI->getRegisterTypeForCallingConv(
9743           *CurDAG->getContext(), F.getCallingConv(), VT);
9744       unsigned NumRegs = TLI->getNumRegistersForCallingConv(
9745           *CurDAG->getContext(), F.getCallingConv(), VT);
9746       for (unsigned i = 0; i != NumRegs; ++i) {
9747         // For scalable vectors, use the minimum size; individual targets
9748         // are responsible for handling scalable vector arguments and
9749         // return values.
9750         ISD::InputArg MyFlags(Flags, RegisterVT, VT, isArgValueUsed,
9751                  ArgNo, PartBase+i*RegisterVT.getStoreSize().getKnownMinSize());
9752         if (NumRegs > 1 && i == 0)
9753           MyFlags.Flags.setSplit();
9754         // if it isn't first piece, alignment must be 1
9755         else if (i > 0) {
9756           MyFlags.Flags.setOrigAlign(Align(1));
9757           if (i == NumRegs - 1)
9758             MyFlags.Flags.setSplitEnd();
9759         }
9760         Ins.push_back(MyFlags);
9761       }
9762       if (NeedsRegBlock && Value == NumValues - 1)
9763         Ins[Ins.size() - 1].Flags.setInConsecutiveRegsLast();
9764       PartBase += VT.getStoreSize().getKnownMinSize();
9765     }
9766   }
9767 
9768   // Call the target to set up the argument values.
9769   SmallVector<SDValue, 8> InVals;
9770   SDValue NewRoot = TLI->LowerFormalArguments(
9771       DAG.getRoot(), F.getCallingConv(), F.isVarArg(), Ins, dl, DAG, InVals);
9772 
9773   // Verify that the target's LowerFormalArguments behaved as expected.
9774   assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other &&
9775          "LowerFormalArguments didn't return a valid chain!");
9776   assert(InVals.size() == Ins.size() &&
9777          "LowerFormalArguments didn't emit the correct number of values!");
9778   LLVM_DEBUG({
9779     for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
9780       assert(InVals[i].getNode() &&
9781              "LowerFormalArguments emitted a null value!");
9782       assert(EVT(Ins[i].VT) == InVals[i].getValueType() &&
9783              "LowerFormalArguments emitted a value with the wrong type!");
9784     }
9785   });
9786 
9787   // Update the DAG with the new chain value resulting from argument lowering.
9788   DAG.setRoot(NewRoot);
9789 
9790   // Set up the argument values.
9791   unsigned i = 0;
9792   if (!FuncInfo->CanLowerReturn) {
9793     // Create a virtual register for the sret pointer, and put in a copy
9794     // from the sret argument into it.
9795     SmallVector<EVT, 1> ValueVTs;
9796     ComputeValueVTs(*TLI, DAG.getDataLayout(),
9797                     F.getReturnType()->getPointerTo(
9798                         DAG.getDataLayout().getAllocaAddrSpace()),
9799                     ValueVTs);
9800     MVT VT = ValueVTs[0].getSimpleVT();
9801     MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
9802     Optional<ISD::NodeType> AssertOp = None;
9803     SDValue ArgValue = getCopyFromParts(DAG, dl, &InVals[0], 1, RegVT, VT,
9804                                         nullptr, F.getCallingConv(), AssertOp);
9805 
9806     MachineFunction& MF = SDB->DAG.getMachineFunction();
9807     MachineRegisterInfo& RegInfo = MF.getRegInfo();
9808     Register SRetReg =
9809         RegInfo.createVirtualRegister(TLI->getRegClassFor(RegVT));
9810     FuncInfo->DemoteRegister = SRetReg;
9811     NewRoot =
9812         SDB->DAG.getCopyToReg(NewRoot, SDB->getCurSDLoc(), SRetReg, ArgValue);
9813     DAG.setRoot(NewRoot);
9814 
9815     // i indexes lowered arguments.  Bump it past the hidden sret argument.
9816     ++i;
9817   }
9818 
9819   SmallVector<SDValue, 4> Chains;
9820   DenseMap<int, int> ArgCopyElisionFrameIndexMap;
9821   for (const Argument &Arg : F.args()) {
9822     SmallVector<SDValue, 4> ArgValues;
9823     SmallVector<EVT, 4> ValueVTs;
9824     ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
9825     unsigned NumValues = ValueVTs.size();
9826     if (NumValues == 0)
9827       continue;
9828 
9829     bool ArgHasUses = !Arg.use_empty();
9830 
9831     // Elide the copying store if the target loaded this argument from a
9832     // suitable fixed stack object.
9833     if (Ins[i].Flags.isCopyElisionCandidate()) {
9834       tryToElideArgumentCopy(*FuncInfo, Chains, ArgCopyElisionFrameIndexMap,
9835                              ElidedArgCopyInstrs, ArgCopyElisionCandidates, Arg,
9836                              InVals[i], ArgHasUses);
9837     }
9838 
9839     // If this argument is unused then remember its value. It is used to generate
9840     // debugging information.
9841     bool isSwiftErrorArg =
9842         TLI->supportSwiftError() &&
9843         Arg.hasAttribute(Attribute::SwiftError);
9844     if (!ArgHasUses && !isSwiftErrorArg) {
9845       SDB->setUnusedArgValue(&Arg, InVals[i]);
9846 
9847       // Also remember any frame index for use in FastISel.
9848       if (FrameIndexSDNode *FI =
9849           dyn_cast<FrameIndexSDNode>(InVals[i].getNode()))
9850         FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
9851     }
9852 
9853     for (unsigned Val = 0; Val != NumValues; ++Val) {
9854       EVT VT = ValueVTs[Val];
9855       MVT PartVT = TLI->getRegisterTypeForCallingConv(*CurDAG->getContext(),
9856                                                       F.getCallingConv(), VT);
9857       unsigned NumParts = TLI->getNumRegistersForCallingConv(
9858           *CurDAG->getContext(), F.getCallingConv(), VT);
9859 
9860       // Even an apparent 'unused' swifterror argument needs to be returned. So
9861       // we do generate a copy for it that can be used on return from the
9862       // function.
9863       if (ArgHasUses || isSwiftErrorArg) {
9864         Optional<ISD::NodeType> AssertOp;
9865         if (Arg.hasAttribute(Attribute::SExt))
9866           AssertOp = ISD::AssertSext;
9867         else if (Arg.hasAttribute(Attribute::ZExt))
9868           AssertOp = ISD::AssertZext;
9869 
9870         ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], NumParts,
9871                                              PartVT, VT, nullptr,
9872                                              F.getCallingConv(), AssertOp));
9873       }
9874 
9875       i += NumParts;
9876     }
9877 
9878     // We don't need to do anything else for unused arguments.
9879     if (ArgValues.empty())
9880       continue;
9881 
9882     // Note down frame index.
9883     if (FrameIndexSDNode *FI =
9884         dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode()))
9885       FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
9886 
9887     SDValue Res = DAG.getMergeValues(makeArrayRef(ArgValues.data(), NumValues),
9888                                      SDB->getCurSDLoc());
9889 
9890     SDB->setValue(&Arg, Res);
9891     if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::BUILD_PAIR) {
9892       // We want to associate the argument with the frame index, among
9893       // involved operands, that correspond to the lowest address. The
9894       // getCopyFromParts function, called earlier, is swapping the order of
9895       // the operands to BUILD_PAIR depending on endianness. The result of
9896       // that swapping is that the least significant bits of the argument will
9897       // be in the first operand of the BUILD_PAIR node, and the most
9898       // significant bits will be in the second operand.
9899       unsigned LowAddressOp = DAG.getDataLayout().isBigEndian() ? 1 : 0;
9900       if (LoadSDNode *LNode =
9901           dyn_cast<LoadSDNode>(Res.getOperand(LowAddressOp).getNode()))
9902         if (FrameIndexSDNode *FI =
9903             dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
9904           FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
9905     }
9906 
9907     // Analyses past this point are naive and don't expect an assertion.
9908     if (Res.getOpcode() == ISD::AssertZext)
9909       Res = Res.getOperand(0);
9910 
9911     // Update the SwiftErrorVRegDefMap.
9912     if (Res.getOpcode() == ISD::CopyFromReg && isSwiftErrorArg) {
9913       unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
9914       if (Register::isVirtualRegister(Reg))
9915         SwiftError->setCurrentVReg(FuncInfo->MBB, SwiftError->getFunctionArg(),
9916                                    Reg);
9917     }
9918 
9919     // If this argument is live outside of the entry block, insert a copy from
9920     // wherever we got it to the vreg that other BB's will reference it as.
9921     if (Res.getOpcode() == ISD::CopyFromReg) {
9922       // If we can, though, try to skip creating an unnecessary vreg.
9923       // FIXME: This isn't very clean... it would be nice to make this more
9924       // general.
9925       unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
9926       if (Register::isVirtualRegister(Reg)) {
9927         FuncInfo->ValueMap[&Arg] = Reg;
9928         continue;
9929       }
9930     }
9931     if (!isOnlyUsedInEntryBlock(&Arg, TM.Options.EnableFastISel)) {
9932       FuncInfo->InitializeRegForValue(&Arg);
9933       SDB->CopyToExportRegsIfNeeded(&Arg);
9934     }
9935   }
9936 
9937   if (!Chains.empty()) {
9938     Chains.push_back(NewRoot);
9939     NewRoot = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
9940   }
9941 
9942   DAG.setRoot(NewRoot);
9943 
9944   assert(i == InVals.size() && "Argument register count mismatch!");
9945 
9946   // If any argument copy elisions occurred and we have debug info, update the
9947   // stale frame indices used in the dbg.declare variable info table.
9948   MachineFunction::VariableDbgInfoMapTy &DbgDeclareInfo = MF->getVariableDbgInfo();
9949   if (!DbgDeclareInfo.empty() && !ArgCopyElisionFrameIndexMap.empty()) {
9950     for (MachineFunction::VariableDbgInfo &VI : DbgDeclareInfo) {
9951       auto I = ArgCopyElisionFrameIndexMap.find(VI.Slot);
9952       if (I != ArgCopyElisionFrameIndexMap.end())
9953         VI.Slot = I->second;
9954     }
9955   }
9956 
9957   // Finally, if the target has anything special to do, allow it to do so.
9958   emitFunctionEntryCode();
9959 }
9960 
9961 /// Handle PHI nodes in successor blocks.  Emit code into the SelectionDAG to
9962 /// ensure constants are generated when needed.  Remember the virtual registers
9963 /// that need to be added to the Machine PHI nodes as input.  We cannot just
9964 /// directly add them, because expansion might result in multiple MBB's for one
9965 /// BB.  As such, the start of the BB might correspond to a different MBB than
9966 /// the end.
9967 void
9968 SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
9969   const Instruction *TI = LLVMBB->getTerminator();
9970 
9971   SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
9972 
9973   // Check PHI nodes in successors that expect a value to be available from this
9974   // block.
9975   for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
9976     const BasicBlock *SuccBB = TI->getSuccessor(succ);
9977     if (!isa<PHINode>(SuccBB->begin())) continue;
9978     MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
9979 
9980     // If this terminator has multiple identical successors (common for
9981     // switches), only handle each succ once.
9982     if (!SuccsHandled.insert(SuccMBB).second)
9983       continue;
9984 
9985     MachineBasicBlock::iterator MBBI = SuccMBB->begin();
9986 
9987     // At this point we know that there is a 1-1 correspondence between LLVM PHI
9988     // nodes and Machine PHI nodes, but the incoming operands have not been
9989     // emitted yet.
9990     for (const PHINode &PN : SuccBB->phis()) {
9991       // Ignore dead phi's.
9992       if (PN.use_empty())
9993         continue;
9994 
9995       // Skip empty types
9996       if (PN.getType()->isEmptyTy())
9997         continue;
9998 
9999       unsigned Reg;
10000       const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
10001 
10002       if (const Constant *C = dyn_cast<Constant>(PHIOp)) {
10003         unsigned &RegOut = ConstantsOut[C];
10004         if (RegOut == 0) {
10005           RegOut = FuncInfo.CreateRegs(C);
10006           CopyValueToVirtualRegister(C, RegOut);
10007         }
10008         Reg = RegOut;
10009       } else {
10010         DenseMap<const Value *, Register>::iterator I =
10011           FuncInfo.ValueMap.find(PHIOp);
10012         if (I != FuncInfo.ValueMap.end())
10013           Reg = I->second;
10014         else {
10015           assert(isa<AllocaInst>(PHIOp) &&
10016                  FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
10017                  "Didn't codegen value into a register!??");
10018           Reg = FuncInfo.CreateRegs(PHIOp);
10019           CopyValueToVirtualRegister(PHIOp, Reg);
10020         }
10021       }
10022 
10023       // Remember that this register needs to added to the machine PHI node as
10024       // the input for this MBB.
10025       SmallVector<EVT, 4> ValueVTs;
10026       const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10027       ComputeValueVTs(TLI, DAG.getDataLayout(), PN.getType(), ValueVTs);
10028       for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
10029         EVT VT = ValueVTs[vti];
10030         unsigned NumRegisters = TLI.getNumRegisters(*DAG.getContext(), VT);
10031         for (unsigned i = 0, e = NumRegisters; i != e; ++i)
10032           FuncInfo.PHINodesToUpdate.push_back(
10033               std::make_pair(&*MBBI++, Reg + i));
10034         Reg += NumRegisters;
10035       }
10036     }
10037   }
10038 
10039   ConstantsOut.clear();
10040 }
10041 
10042 /// Add a successor MBB to ParentMBB< creating a new MachineBB for BB if SuccMBB
10043 /// is 0.
10044 MachineBasicBlock *
10045 SelectionDAGBuilder::StackProtectorDescriptor::
10046 AddSuccessorMBB(const BasicBlock *BB,
10047                 MachineBasicBlock *ParentMBB,
10048                 bool IsLikely,
10049                 MachineBasicBlock *SuccMBB) {
10050   // If SuccBB has not been created yet, create it.
10051   if (!SuccMBB) {
10052     MachineFunction *MF = ParentMBB->getParent();
10053     MachineFunction::iterator BBI(ParentMBB);
10054     SuccMBB = MF->CreateMachineBasicBlock(BB);
10055     MF->insert(++BBI, SuccMBB);
10056   }
10057   // Add it as a successor of ParentMBB.
10058   ParentMBB->addSuccessor(
10059       SuccMBB, BranchProbabilityInfo::getBranchProbStackProtector(IsLikely));
10060   return SuccMBB;
10061 }
10062 
10063 MachineBasicBlock *SelectionDAGBuilder::NextBlock(MachineBasicBlock *MBB) {
10064   MachineFunction::iterator I(MBB);
10065   if (++I == FuncInfo.MF->end())
10066     return nullptr;
10067   return &*I;
10068 }
10069 
10070 /// During lowering new call nodes can be created (such as memset, etc.).
10071 /// Those will become new roots of the current DAG, but complications arise
10072 /// when they are tail calls. In such cases, the call lowering will update
10073 /// the root, but the builder still needs to know that a tail call has been
10074 /// lowered in order to avoid generating an additional return.
10075 void SelectionDAGBuilder::updateDAGForMaybeTailCall(SDValue MaybeTC) {
10076   // If the node is null, we do have a tail call.
10077   if (MaybeTC.getNode() != nullptr)
10078     DAG.setRoot(MaybeTC);
10079   else
10080     HasTailCall = true;
10081 }
10082 
10083 void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W, Value *Cond,
10084                                         MachineBasicBlock *SwitchMBB,
10085                                         MachineBasicBlock *DefaultMBB) {
10086   MachineFunction *CurMF = FuncInfo.MF;
10087   MachineBasicBlock *NextMBB = nullptr;
10088   MachineFunction::iterator BBI(W.MBB);
10089   if (++BBI != FuncInfo.MF->end())
10090     NextMBB = &*BBI;
10091 
10092   unsigned Size = W.LastCluster - W.FirstCluster + 1;
10093 
10094   BranchProbabilityInfo *BPI = FuncInfo.BPI;
10095 
10096   if (Size == 2 && W.MBB == SwitchMBB) {
10097     // If any two of the cases has the same destination, and if one value
10098     // is the same as the other, but has one bit unset that the other has set,
10099     // use bit manipulation to do two compares at once.  For example:
10100     // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
10101     // TODO: This could be extended to merge any 2 cases in switches with 3
10102     // cases.
10103     // TODO: Handle cases where W.CaseBB != SwitchBB.
10104     CaseCluster &Small = *W.FirstCluster;
10105     CaseCluster &Big = *W.LastCluster;
10106 
10107     if (Small.Low == Small.High && Big.Low == Big.High &&
10108         Small.MBB == Big.MBB) {
10109       const APInt &SmallValue = Small.Low->getValue();
10110       const APInt &BigValue = Big.Low->getValue();
10111 
10112       // Check that there is only one bit different.
10113       APInt CommonBit = BigValue ^ SmallValue;
10114       if (CommonBit.isPowerOf2()) {
10115         SDValue CondLHS = getValue(Cond);
10116         EVT VT = CondLHS.getValueType();
10117         SDLoc DL = getCurSDLoc();
10118 
10119         SDValue Or = DAG.getNode(ISD::OR, DL, VT, CondLHS,
10120                                  DAG.getConstant(CommonBit, DL, VT));
10121         SDValue Cond = DAG.getSetCC(
10122             DL, MVT::i1, Or, DAG.getConstant(BigValue | SmallValue, DL, VT),
10123             ISD::SETEQ);
10124 
10125         // Update successor info.
10126         // Both Small and Big will jump to Small.BB, so we sum up the
10127         // probabilities.
10128         addSuccessorWithProb(SwitchMBB, Small.MBB, Small.Prob + Big.Prob);
10129         if (BPI)
10130           addSuccessorWithProb(
10131               SwitchMBB, DefaultMBB,
10132               // The default destination is the first successor in IR.
10133               BPI->getEdgeProbability(SwitchMBB->getBasicBlock(), (unsigned)0));
10134         else
10135           addSuccessorWithProb(SwitchMBB, DefaultMBB);
10136 
10137         // Insert the true branch.
10138         SDValue BrCond =
10139             DAG.getNode(ISD::BRCOND, DL, MVT::Other, getControlRoot(), Cond,
10140                         DAG.getBasicBlock(Small.MBB));
10141         // Insert the false branch.
10142         BrCond = DAG.getNode(ISD::BR, DL, MVT::Other, BrCond,
10143                              DAG.getBasicBlock(DefaultMBB));
10144 
10145         DAG.setRoot(BrCond);
10146         return;
10147       }
10148     }
10149   }
10150 
10151   if (TM.getOptLevel() != CodeGenOpt::None) {
10152     // Here, we order cases by probability so the most likely case will be
10153     // checked first. However, two clusters can have the same probability in
10154     // which case their relative ordering is non-deterministic. So we use Low
10155     // as a tie-breaker as clusters are guaranteed to never overlap.
10156     llvm::sort(W.FirstCluster, W.LastCluster + 1,
10157                [](const CaseCluster &a, const CaseCluster &b) {
10158       return a.Prob != b.Prob ?
10159              a.Prob > b.Prob :
10160              a.Low->getValue().slt(b.Low->getValue());
10161     });
10162 
10163     // Rearrange the case blocks so that the last one falls through if possible
10164     // without changing the order of probabilities.
10165     for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster; ) {
10166       --I;
10167       if (I->Prob > W.LastCluster->Prob)
10168         break;
10169       if (I->Kind == CC_Range && I->MBB == NextMBB) {
10170         std::swap(*I, *W.LastCluster);
10171         break;
10172       }
10173     }
10174   }
10175 
10176   // Compute total probability.
10177   BranchProbability DefaultProb = W.DefaultProb;
10178   BranchProbability UnhandledProbs = DefaultProb;
10179   for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
10180     UnhandledProbs += I->Prob;
10181 
10182   MachineBasicBlock *CurMBB = W.MBB;
10183   for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
10184     bool FallthroughUnreachable = false;
10185     MachineBasicBlock *Fallthrough;
10186     if (I == W.LastCluster) {
10187       // For the last cluster, fall through to the default destination.
10188       Fallthrough = DefaultMBB;
10189       FallthroughUnreachable = isa<UnreachableInst>(
10190           DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg());
10191     } else {
10192       Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
10193       CurMF->insert(BBI, Fallthrough);
10194       // Put Cond in a virtual register to make it available from the new blocks.
10195       ExportFromCurrentBlock(Cond);
10196     }
10197     UnhandledProbs -= I->Prob;
10198 
10199     switch (I->Kind) {
10200       case CC_JumpTable: {
10201         // FIXME: Optimize away range check based on pivot comparisons.
10202         JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first;
10203         SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second;
10204 
10205         // The jump block hasn't been inserted yet; insert it here.
10206         MachineBasicBlock *JumpMBB = JT->MBB;
10207         CurMF->insert(BBI, JumpMBB);
10208 
10209         auto JumpProb = I->Prob;
10210         auto FallthroughProb = UnhandledProbs;
10211 
10212         // If the default statement is a target of the jump table, we evenly
10213         // distribute the default probability to successors of CurMBB. Also
10214         // update the probability on the edge from JumpMBB to Fallthrough.
10215         for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
10216                                               SE = JumpMBB->succ_end();
10217              SI != SE; ++SI) {
10218           if (*SI == DefaultMBB) {
10219             JumpProb += DefaultProb / 2;
10220             FallthroughProb -= DefaultProb / 2;
10221             JumpMBB->setSuccProbability(SI, DefaultProb / 2);
10222             JumpMBB->normalizeSuccProbs();
10223             break;
10224           }
10225         }
10226 
10227         if (FallthroughUnreachable) {
10228           // Skip the range check if the fallthrough block is unreachable.
10229           JTH->OmitRangeCheck = true;
10230         }
10231 
10232         if (!JTH->OmitRangeCheck)
10233           addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
10234         addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
10235         CurMBB->normalizeSuccProbs();
10236 
10237         // The jump table header will be inserted in our current block, do the
10238         // range check, and fall through to our fallthrough block.
10239         JTH->HeaderBB = CurMBB;
10240         JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
10241 
10242         // If we're in the right place, emit the jump table header right now.
10243         if (CurMBB == SwitchMBB) {
10244           visitJumpTableHeader(*JT, *JTH, SwitchMBB);
10245           JTH->Emitted = true;
10246         }
10247         break;
10248       }
10249       case CC_BitTests: {
10250         // FIXME: Optimize away range check based on pivot comparisons.
10251         BitTestBlock *BTB = &SL->BitTestCases[I->BTCasesIndex];
10252 
10253         // The bit test blocks haven't been inserted yet; insert them here.
10254         for (BitTestCase &BTC : BTB->Cases)
10255           CurMF->insert(BBI, BTC.ThisBB);
10256 
10257         // Fill in fields of the BitTestBlock.
10258         BTB->Parent = CurMBB;
10259         BTB->Default = Fallthrough;
10260 
10261         BTB->DefaultProb = UnhandledProbs;
10262         // If the cases in bit test don't form a contiguous range, we evenly
10263         // distribute the probability on the edge to Fallthrough to two
10264         // successors of CurMBB.
10265         if (!BTB->ContiguousRange) {
10266           BTB->Prob += DefaultProb / 2;
10267           BTB->DefaultProb -= DefaultProb / 2;
10268         }
10269 
10270         if (FallthroughUnreachable) {
10271           // Skip the range check if the fallthrough block is unreachable.
10272           BTB->OmitRangeCheck = true;
10273         }
10274 
10275         // If we're in the right place, emit the bit test header right now.
10276         if (CurMBB == SwitchMBB) {
10277           visitBitTestHeader(*BTB, SwitchMBB);
10278           BTB->Emitted = true;
10279         }
10280         break;
10281       }
10282       case CC_Range: {
10283         const Value *RHS, *LHS, *MHS;
10284         ISD::CondCode CC;
10285         if (I->Low == I->High) {
10286           // Check Cond == I->Low.
10287           CC = ISD::SETEQ;
10288           LHS = Cond;
10289           RHS=I->Low;
10290           MHS = nullptr;
10291         } else {
10292           // Check I->Low <= Cond <= I->High.
10293           CC = ISD::SETLE;
10294           LHS = I->Low;
10295           MHS = Cond;
10296           RHS = I->High;
10297         }
10298 
10299         // If Fallthrough is unreachable, fold away the comparison.
10300         if (FallthroughUnreachable)
10301           CC = ISD::SETTRUE;
10302 
10303         // The false probability is the sum of all unhandled cases.
10304         CaseBlock CB(CC, LHS, RHS, MHS, I->MBB, Fallthrough, CurMBB,
10305                      getCurSDLoc(), I->Prob, UnhandledProbs);
10306 
10307         if (CurMBB == SwitchMBB)
10308           visitSwitchCase(CB, SwitchMBB);
10309         else
10310           SL->SwitchCases.push_back(CB);
10311 
10312         break;
10313       }
10314     }
10315     CurMBB = Fallthrough;
10316   }
10317 }
10318 
10319 unsigned SelectionDAGBuilder::caseClusterRank(const CaseCluster &CC,
10320                                               CaseClusterIt First,
10321                                               CaseClusterIt Last) {
10322   return std::count_if(First, Last + 1, [&](const CaseCluster &X) {
10323     if (X.Prob != CC.Prob)
10324       return X.Prob > CC.Prob;
10325 
10326     // Ties are broken by comparing the case value.
10327     return X.Low->getValue().slt(CC.Low->getValue());
10328   });
10329 }
10330 
10331 void SelectionDAGBuilder::splitWorkItem(SwitchWorkList &WorkList,
10332                                         const SwitchWorkListItem &W,
10333                                         Value *Cond,
10334                                         MachineBasicBlock *SwitchMBB) {
10335   assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) &&
10336          "Clusters not sorted?");
10337 
10338   assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!");
10339 
10340   // Balance the tree based on branch probabilities to create a near-optimal (in
10341   // terms of search time given key frequency) binary search tree. See e.g. Kurt
10342   // Mehlhorn "Nearly Optimal Binary Search Trees" (1975).
10343   CaseClusterIt LastLeft = W.FirstCluster;
10344   CaseClusterIt FirstRight = W.LastCluster;
10345   auto LeftProb = LastLeft->Prob + W.DefaultProb / 2;
10346   auto RightProb = FirstRight->Prob + W.DefaultProb / 2;
10347 
10348   // Move LastLeft and FirstRight towards each other from opposite directions to
10349   // find a partitioning of the clusters which balances the probability on both
10350   // sides. If LeftProb and RightProb are equal, alternate which side is
10351   // taken to ensure 0-probability nodes are distributed evenly.
10352   unsigned I = 0;
10353   while (LastLeft + 1 < FirstRight) {
10354     if (LeftProb < RightProb || (LeftProb == RightProb && (I & 1)))
10355       LeftProb += (++LastLeft)->Prob;
10356     else
10357       RightProb += (--FirstRight)->Prob;
10358     I++;
10359   }
10360 
10361   while (true) {
10362     // Our binary search tree differs from a typical BST in that ours can have up
10363     // to three values in each leaf. The pivot selection above doesn't take that
10364     // into account, which means the tree might require more nodes and be less
10365     // efficient. We compensate for this here.
10366 
10367     unsigned NumLeft = LastLeft - W.FirstCluster + 1;
10368     unsigned NumRight = W.LastCluster - FirstRight + 1;
10369 
10370     if (std::min(NumLeft, NumRight) < 3 && std::max(NumLeft, NumRight) > 3) {
10371       // If one side has less than 3 clusters, and the other has more than 3,
10372       // consider taking a cluster from the other side.
10373 
10374       if (NumLeft < NumRight) {
10375         // Consider moving the first cluster on the right to the left side.
10376         CaseCluster &CC = *FirstRight;
10377         unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
10378         unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
10379         if (LeftSideRank <= RightSideRank) {
10380           // Moving the cluster to the left does not demote it.
10381           ++LastLeft;
10382           ++FirstRight;
10383           continue;
10384         }
10385       } else {
10386         assert(NumRight < NumLeft);
10387         // Consider moving the last element on the left to the right side.
10388         CaseCluster &CC = *LastLeft;
10389         unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
10390         unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
10391         if (RightSideRank <= LeftSideRank) {
10392           // Moving the cluster to the right does not demot it.
10393           --LastLeft;
10394           --FirstRight;
10395           continue;
10396         }
10397       }
10398     }
10399     break;
10400   }
10401 
10402   assert(LastLeft + 1 == FirstRight);
10403   assert(LastLeft >= W.FirstCluster);
10404   assert(FirstRight <= W.LastCluster);
10405 
10406   // Use the first element on the right as pivot since we will make less-than
10407   // comparisons against it.
10408   CaseClusterIt PivotCluster = FirstRight;
10409   assert(PivotCluster > W.FirstCluster);
10410   assert(PivotCluster <= W.LastCluster);
10411 
10412   CaseClusterIt FirstLeft = W.FirstCluster;
10413   CaseClusterIt LastRight = W.LastCluster;
10414 
10415   const ConstantInt *Pivot = PivotCluster->Low;
10416 
10417   // New blocks will be inserted immediately after the current one.
10418   MachineFunction::iterator BBI(W.MBB);
10419   ++BBI;
10420 
10421   // We will branch to the LHS if Value < Pivot. If LHS is a single cluster,
10422   // we can branch to its destination directly if it's squeezed exactly in
10423   // between the known lower bound and Pivot - 1.
10424   MachineBasicBlock *LeftMBB;
10425   if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&
10426       FirstLeft->Low == W.GE &&
10427       (FirstLeft->High->getValue() + 1LL) == Pivot->getValue()) {
10428     LeftMBB = FirstLeft->MBB;
10429   } else {
10430     LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
10431     FuncInfo.MF->insert(BBI, LeftMBB);
10432     WorkList.push_back(
10433         {LeftMBB, FirstLeft, LastLeft, W.GE, Pivot, W.DefaultProb / 2});
10434     // Put Cond in a virtual register to make it available from the new blocks.
10435     ExportFromCurrentBlock(Cond);
10436   }
10437 
10438   // Similarly, we will branch to the RHS if Value >= Pivot. If RHS is a
10439   // single cluster, RHS.Low == Pivot, and we can branch to its destination
10440   // directly if RHS.High equals the current upper bound.
10441   MachineBasicBlock *RightMBB;
10442   if (FirstRight == LastRight && FirstRight->Kind == CC_Range &&
10443       W.LT && (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) {
10444     RightMBB = FirstRight->MBB;
10445   } else {
10446     RightMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
10447     FuncInfo.MF->insert(BBI, RightMBB);
10448     WorkList.push_back(
10449         {RightMBB, FirstRight, LastRight, Pivot, W.LT, W.DefaultProb / 2});
10450     // Put Cond in a virtual register to make it available from the new blocks.
10451     ExportFromCurrentBlock(Cond);
10452   }
10453 
10454   // Create the CaseBlock record that will be used to lower the branch.
10455   CaseBlock CB(ISD::SETLT, Cond, Pivot, nullptr, LeftMBB, RightMBB, W.MBB,
10456                getCurSDLoc(), LeftProb, RightProb);
10457 
10458   if (W.MBB == SwitchMBB)
10459     visitSwitchCase(CB, SwitchMBB);
10460   else
10461     SL->SwitchCases.push_back(CB);
10462 }
10463 
10464 // Scale CaseProb after peeling a case with the probablity of PeeledCaseProb
10465 // from the swith statement.
10466 static BranchProbability scaleCaseProbality(BranchProbability CaseProb,
10467                                             BranchProbability PeeledCaseProb) {
10468   if (PeeledCaseProb == BranchProbability::getOne())
10469     return BranchProbability::getZero();
10470   BranchProbability SwitchProb = PeeledCaseProb.getCompl();
10471 
10472   uint32_t Numerator = CaseProb.getNumerator();
10473   uint32_t Denominator = SwitchProb.scale(CaseProb.getDenominator());
10474   return BranchProbability(Numerator, std::max(Numerator, Denominator));
10475 }
10476 
10477 // Try to peel the top probability case if it exceeds the threshold.
10478 // Return current MachineBasicBlock for the switch statement if the peeling
10479 // does not occur.
10480 // If the peeling is performed, return the newly created MachineBasicBlock
10481 // for the peeled switch statement. Also update Clusters to remove the peeled
10482 // case. PeeledCaseProb is the BranchProbability for the peeled case.
10483 MachineBasicBlock *SelectionDAGBuilder::peelDominantCaseCluster(
10484     const SwitchInst &SI, CaseClusterVector &Clusters,
10485     BranchProbability &PeeledCaseProb) {
10486   MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
10487   // Don't perform if there is only one cluster or optimizing for size.
10488   if (SwitchPeelThreshold > 100 || !FuncInfo.BPI || Clusters.size() < 2 ||
10489       TM.getOptLevel() == CodeGenOpt::None ||
10490       SwitchMBB->getParent()->getFunction().hasMinSize())
10491     return SwitchMBB;
10492 
10493   BranchProbability TopCaseProb = BranchProbability(SwitchPeelThreshold, 100);
10494   unsigned PeeledCaseIndex = 0;
10495   bool SwitchPeeled = false;
10496   for (unsigned Index = 0; Index < Clusters.size(); ++Index) {
10497     CaseCluster &CC = Clusters[Index];
10498     if (CC.Prob < TopCaseProb)
10499       continue;
10500     TopCaseProb = CC.Prob;
10501     PeeledCaseIndex = Index;
10502     SwitchPeeled = true;
10503   }
10504   if (!SwitchPeeled)
10505     return SwitchMBB;
10506 
10507   LLVM_DEBUG(dbgs() << "Peeled one top case in switch stmt, prob: "
10508                     << TopCaseProb << "\n");
10509 
10510   // Record the MBB for the peeled switch statement.
10511   MachineFunction::iterator BBI(SwitchMBB);
10512   ++BBI;
10513   MachineBasicBlock *PeeledSwitchMBB =
10514       FuncInfo.MF->CreateMachineBasicBlock(SwitchMBB->getBasicBlock());
10515   FuncInfo.MF->insert(BBI, PeeledSwitchMBB);
10516 
10517   ExportFromCurrentBlock(SI.getCondition());
10518   auto PeeledCaseIt = Clusters.begin() + PeeledCaseIndex;
10519   SwitchWorkListItem W = {SwitchMBB, PeeledCaseIt, PeeledCaseIt,
10520                           nullptr,   nullptr,      TopCaseProb.getCompl()};
10521   lowerWorkItem(W, SI.getCondition(), SwitchMBB, PeeledSwitchMBB);
10522 
10523   Clusters.erase(PeeledCaseIt);
10524   for (CaseCluster &CC : Clusters) {
10525     LLVM_DEBUG(
10526         dbgs() << "Scale the probablity for one cluster, before scaling: "
10527                << CC.Prob << "\n");
10528     CC.Prob = scaleCaseProbality(CC.Prob, TopCaseProb);
10529     LLVM_DEBUG(dbgs() << "After scaling: " << CC.Prob << "\n");
10530   }
10531   PeeledCaseProb = TopCaseProb;
10532   return PeeledSwitchMBB;
10533 }
10534 
10535 void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
10536   // Extract cases from the switch.
10537   BranchProbabilityInfo *BPI = FuncInfo.BPI;
10538   CaseClusterVector Clusters;
10539   Clusters.reserve(SI.getNumCases());
10540   for (auto I : SI.cases()) {
10541     MachineBasicBlock *Succ = FuncInfo.MBBMap[I.getCaseSuccessor()];
10542     const ConstantInt *CaseVal = I.getCaseValue();
10543     BranchProbability Prob =
10544         BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
10545             : BranchProbability(1, SI.getNumCases() + 1);
10546     Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
10547   }
10548 
10549   MachineBasicBlock *DefaultMBB = FuncInfo.MBBMap[SI.getDefaultDest()];
10550 
10551   // Cluster adjacent cases with the same destination. We do this at all
10552   // optimization levels because it's cheap to do and will make codegen faster
10553   // if there are many clusters.
10554   sortAndRangeify(Clusters);
10555 
10556   // The branch probablity of the peeled case.
10557   BranchProbability PeeledCaseProb = BranchProbability::getZero();
10558   MachineBasicBlock *PeeledSwitchMBB =
10559       peelDominantCaseCluster(SI, Clusters, PeeledCaseProb);
10560 
10561   // If there is only the default destination, jump there directly.
10562   MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
10563   if (Clusters.empty()) {
10564     assert(PeeledSwitchMBB == SwitchMBB);
10565     SwitchMBB->addSuccessor(DefaultMBB);
10566     if (DefaultMBB != NextBlock(SwitchMBB)) {
10567       DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
10568                               getControlRoot(), DAG.getBasicBlock(DefaultMBB)));
10569     }
10570     return;
10571   }
10572 
10573   SL->findJumpTables(Clusters, &SI, DefaultMBB, DAG.getPSI(), DAG.getBFI());
10574   SL->findBitTestClusters(Clusters, &SI);
10575 
10576   LLVM_DEBUG({
10577     dbgs() << "Case clusters: ";
10578     for (const CaseCluster &C : Clusters) {
10579       if (C.Kind == CC_JumpTable)
10580         dbgs() << "JT:";
10581       if (C.Kind == CC_BitTests)
10582         dbgs() << "BT:";
10583 
10584       C.Low->getValue().print(dbgs(), true);
10585       if (C.Low != C.High) {
10586         dbgs() << '-';
10587         C.High->getValue().print(dbgs(), true);
10588       }
10589       dbgs() << ' ';
10590     }
10591     dbgs() << '\n';
10592   });
10593 
10594   assert(!Clusters.empty());
10595   SwitchWorkList WorkList;
10596   CaseClusterIt First = Clusters.begin();
10597   CaseClusterIt Last = Clusters.end() - 1;
10598   auto DefaultProb = getEdgeProbability(PeeledSwitchMBB, DefaultMBB);
10599   // Scale the branchprobability for DefaultMBB if the peel occurs and
10600   // DefaultMBB is not replaced.
10601   if (PeeledCaseProb != BranchProbability::getZero() &&
10602       DefaultMBB == FuncInfo.MBBMap[SI.getDefaultDest()])
10603     DefaultProb = scaleCaseProbality(DefaultProb, PeeledCaseProb);
10604   WorkList.push_back(
10605       {PeeledSwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
10606 
10607   while (!WorkList.empty()) {
10608     SwitchWorkListItem W = WorkList.back();
10609     WorkList.pop_back();
10610     unsigned NumClusters = W.LastCluster - W.FirstCluster + 1;
10611 
10612     if (NumClusters > 3 && TM.getOptLevel() != CodeGenOpt::None &&
10613         !DefaultMBB->getParent()->getFunction().hasMinSize()) {
10614       // For optimized builds, lower large range as a balanced binary tree.
10615       splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB);
10616       continue;
10617     }
10618 
10619     lowerWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB);
10620   }
10621 }
10622 
10623 void SelectionDAGBuilder::visitFreeze(const FreezeInst &I) {
10624   SmallVector<EVT, 4> ValueVTs;
10625   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
10626                   ValueVTs);
10627   unsigned NumValues = ValueVTs.size();
10628   if (NumValues == 0) return;
10629 
10630   SmallVector<SDValue, 4> Values(NumValues);
10631   SDValue Op = getValue(I.getOperand(0));
10632 
10633   for (unsigned i = 0; i != NumValues; ++i)
10634     Values[i] = DAG.getNode(ISD::FREEZE, getCurSDLoc(), ValueVTs[i],
10635                             SDValue(Op.getNode(), Op.getResNo() + i));
10636 
10637   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
10638                            DAG.getVTList(ValueVTs), Values));
10639 }
10640