xref: /llvm-project/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp (revision c57e41c000c40e2efd441bc819b472feb49c94bf)
1 //===- SelectionDAGBuilder.cpp - Selection-DAG building -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements routines for translating from LLVM IR into SelectionDAG IR.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "SelectionDAGBuilder.h"
14 #include "SDNodeDbgValue.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/BitVector.h"
19 #include "llvm/ADT/DenseMap.h"
20 #include "llvm/ADT/None.h"
21 #include "llvm/ADT/Optional.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/ADT/SmallPtrSet.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/StringRef.h"
27 #include "llvm/ADT/Triple.h"
28 #include "llvm/ADT/Twine.h"
29 #include "llvm/Analysis/AliasAnalysis.h"
30 #include "llvm/Analysis/BlockFrequencyInfo.h"
31 #include "llvm/Analysis/BranchProbabilityInfo.h"
32 #include "llvm/Analysis/ConstantFolding.h"
33 #include "llvm/Analysis/EHPersonalities.h"
34 #include "llvm/Analysis/Loads.h"
35 #include "llvm/Analysis/MemoryLocation.h"
36 #include "llvm/Analysis/ProfileSummaryInfo.h"
37 #include "llvm/Analysis/TargetLibraryInfo.h"
38 #include "llvm/Analysis/ValueTracking.h"
39 #include "llvm/Analysis/VectorUtils.h"
40 #include "llvm/CodeGen/Analysis.h"
41 #include "llvm/CodeGen/FunctionLoweringInfo.h"
42 #include "llvm/CodeGen/GCMetadata.h"
43 #include "llvm/CodeGen/ISDOpcodes.h"
44 #include "llvm/CodeGen/MachineBasicBlock.h"
45 #include "llvm/CodeGen/MachineFrameInfo.h"
46 #include "llvm/CodeGen/MachineFunction.h"
47 #include "llvm/CodeGen/MachineInstr.h"
48 #include "llvm/CodeGen/MachineInstrBuilder.h"
49 #include "llvm/CodeGen/MachineJumpTableInfo.h"
50 #include "llvm/CodeGen/MachineMemOperand.h"
51 #include "llvm/CodeGen/MachineModuleInfo.h"
52 #include "llvm/CodeGen/MachineOperand.h"
53 #include "llvm/CodeGen/MachineRegisterInfo.h"
54 #include "llvm/CodeGen/RuntimeLibcalls.h"
55 #include "llvm/CodeGen/SelectionDAG.h"
56 #include "llvm/CodeGen/SelectionDAGNodes.h"
57 #include "llvm/CodeGen/SelectionDAGTargetInfo.h"
58 #include "llvm/CodeGen/StackMaps.h"
59 #include "llvm/CodeGen/SwiftErrorValueTracking.h"
60 #include "llvm/CodeGen/TargetFrameLowering.h"
61 #include "llvm/CodeGen/TargetInstrInfo.h"
62 #include "llvm/CodeGen/TargetLowering.h"
63 #include "llvm/CodeGen/TargetOpcodes.h"
64 #include "llvm/CodeGen/TargetRegisterInfo.h"
65 #include "llvm/CodeGen/TargetSubtargetInfo.h"
66 #include "llvm/CodeGen/ValueTypes.h"
67 #include "llvm/CodeGen/WinEHFuncInfo.h"
68 #include "llvm/IR/Argument.h"
69 #include "llvm/IR/Attributes.h"
70 #include "llvm/IR/BasicBlock.h"
71 #include "llvm/IR/CFG.h"
72 #include "llvm/IR/CallingConv.h"
73 #include "llvm/IR/Constant.h"
74 #include "llvm/IR/ConstantRange.h"
75 #include "llvm/IR/Constants.h"
76 #include "llvm/IR/DataLayout.h"
77 #include "llvm/IR/DebugInfoMetadata.h"
78 #include "llvm/IR/DebugLoc.h"
79 #include "llvm/IR/DerivedTypes.h"
80 #include "llvm/IR/Function.h"
81 #include "llvm/IR/GetElementPtrTypeIterator.h"
82 #include "llvm/IR/InlineAsm.h"
83 #include "llvm/IR/InstrTypes.h"
84 #include "llvm/IR/Instruction.h"
85 #include "llvm/IR/Instructions.h"
86 #include "llvm/IR/IntrinsicInst.h"
87 #include "llvm/IR/Intrinsics.h"
88 #include "llvm/IR/IntrinsicsAArch64.h"
89 #include "llvm/IR/IntrinsicsWebAssembly.h"
90 #include "llvm/IR/LLVMContext.h"
91 #include "llvm/IR/Metadata.h"
92 #include "llvm/IR/Module.h"
93 #include "llvm/IR/Operator.h"
94 #include "llvm/IR/PatternMatch.h"
95 #include "llvm/IR/Statepoint.h"
96 #include "llvm/IR/Type.h"
97 #include "llvm/IR/User.h"
98 #include "llvm/IR/Value.h"
99 #include "llvm/MC/MCContext.h"
100 #include "llvm/MC/MCSymbol.h"
101 #include "llvm/Support/AtomicOrdering.h"
102 #include "llvm/Support/BranchProbability.h"
103 #include "llvm/Support/Casting.h"
104 #include "llvm/Support/CodeGen.h"
105 #include "llvm/Support/CommandLine.h"
106 #include "llvm/Support/Compiler.h"
107 #include "llvm/Support/Debug.h"
108 #include "llvm/Support/ErrorHandling.h"
109 #include "llvm/Support/MachineValueType.h"
110 #include "llvm/Support/MathExtras.h"
111 #include "llvm/Support/raw_ostream.h"
112 #include "llvm/Target/TargetIntrinsicInfo.h"
113 #include "llvm/Target/TargetMachine.h"
114 #include "llvm/Target/TargetOptions.h"
115 #include "llvm/Transforms/Utils/Local.h"
116 #include <algorithm>
117 #include <cassert>
118 #include <cstddef>
119 #include <cstdint>
120 #include <cstring>
121 #include <iterator>
122 #include <limits>
123 #include <numeric>
124 #include <tuple>
125 #include <utility>
126 #include <vector>
127 
128 using namespace llvm;
129 using namespace PatternMatch;
130 using namespace SwitchCG;
131 
132 #define DEBUG_TYPE "isel"
133 
134 /// LimitFloatPrecision - Generate low-precision inline sequences for
135 /// some float libcalls (6, 8 or 12 bits).
136 static unsigned LimitFloatPrecision;
137 
138 static cl::opt<unsigned, true>
139     LimitFPPrecision("limit-float-precision",
140                      cl::desc("Generate low-precision inline sequences "
141                               "for some float libcalls"),
142                      cl::location(LimitFloatPrecision), cl::Hidden,
143                      cl::init(0));
144 
145 static cl::opt<unsigned> SwitchPeelThreshold(
146     "switch-peel-threshold", cl::Hidden, cl::init(66),
147     cl::desc("Set the case probability threshold for peeling the case from a "
148              "switch statement. A value greater than 100 will void this "
149              "optimization"));
150 
151 // Limit the width of DAG chains. This is important in general to prevent
152 // DAG-based analysis from blowing up. For example, alias analysis and
153 // load clustering may not complete in reasonable time. It is difficult to
154 // recognize and avoid this situation within each individual analysis, and
155 // future analyses are likely to have the same behavior. Limiting DAG width is
156 // the safe approach and will be especially important with global DAGs.
157 //
158 // MaxParallelChains default is arbitrarily high to avoid affecting
159 // optimization, but could be lowered to improve compile time. Any ld-ld-st-st
160 // sequence over this should have been converted to llvm.memcpy by the
161 // frontend. It is easy to induce this behavior with .ll code such as:
162 // %buffer = alloca [4096 x i8]
163 // %data = load [4096 x i8]* %argPtr
164 // store [4096 x i8] %data, [4096 x i8]* %buffer
165 static const unsigned MaxParallelChains = 64;
166 
167 // Return the calling convention if the Value passed requires ABI mangling as it
168 // is a parameter to a function or a return value from a function which is not
169 // an intrinsic.
170 static Optional<CallingConv::ID> getABIRegCopyCC(const Value *V) {
171   if (auto *R = dyn_cast<ReturnInst>(V))
172     return R->getParent()->getParent()->getCallingConv();
173 
174   if (auto *CI = dyn_cast<CallInst>(V)) {
175     const bool IsInlineAsm = CI->isInlineAsm();
176     const bool IsIndirectFunctionCall =
177         !IsInlineAsm && !CI->getCalledFunction();
178 
179     // It is possible that the call instruction is an inline asm statement or an
180     // indirect function call in which case the return value of
181     // getCalledFunction() would be nullptr.
182     const bool IsInstrinsicCall =
183         !IsInlineAsm && !IsIndirectFunctionCall &&
184         CI->getCalledFunction()->getIntrinsicID() != Intrinsic::not_intrinsic;
185 
186     if (!IsInlineAsm && !IsInstrinsicCall)
187       return CI->getCallingConv();
188   }
189 
190   return None;
191 }
192 
193 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
194                                       const SDValue *Parts, unsigned NumParts,
195                                       MVT PartVT, EVT ValueVT, const Value *V,
196                                       Optional<CallingConv::ID> CC);
197 
198 /// getCopyFromParts - Create a value that contains the specified legal parts
199 /// combined into the value they represent.  If the parts combine to a type
200 /// larger than ValueVT then AssertOp can be used to specify whether the extra
201 /// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
202 /// (ISD::AssertSext).
203 static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL,
204                                 const SDValue *Parts, unsigned NumParts,
205                                 MVT PartVT, EVT ValueVT, const Value *V,
206                                 Optional<CallingConv::ID> CC = None,
207                                 Optional<ISD::NodeType> AssertOp = None) {
208   if (ValueVT.isVector())
209     return getCopyFromPartsVector(DAG, DL, Parts, NumParts, PartVT, ValueVT, V,
210                                   CC);
211 
212   assert(NumParts > 0 && "No parts to assemble!");
213   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
214   SDValue Val = Parts[0];
215 
216   if (NumParts > 1) {
217     // Assemble the value from multiple parts.
218     if (ValueVT.isInteger()) {
219       unsigned PartBits = PartVT.getSizeInBits();
220       unsigned ValueBits = ValueVT.getSizeInBits();
221 
222       // Assemble the power of 2 part.
223       unsigned RoundParts =
224           (NumParts & (NumParts - 1)) ? 1 << Log2_32(NumParts) : NumParts;
225       unsigned RoundBits = PartBits * RoundParts;
226       EVT RoundVT = RoundBits == ValueBits ?
227         ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
228       SDValue Lo, Hi;
229 
230       EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
231 
232       if (RoundParts > 2) {
233         Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2,
234                               PartVT, HalfVT, V);
235         Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2,
236                               RoundParts / 2, PartVT, HalfVT, V);
237       } else {
238         Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]);
239         Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]);
240       }
241 
242       if (DAG.getDataLayout().isBigEndian())
243         std::swap(Lo, Hi);
244 
245       Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi);
246 
247       if (RoundParts < NumParts) {
248         // Assemble the trailing non-power-of-2 part.
249         unsigned OddParts = NumParts - RoundParts;
250         EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
251         Hi = getCopyFromParts(DAG, DL, Parts + RoundParts, OddParts, PartVT,
252                               OddVT, V, CC);
253 
254         // Combine the round and odd parts.
255         Lo = Val;
256         if (DAG.getDataLayout().isBigEndian())
257           std::swap(Lo, Hi);
258         EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
259         Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi);
260         Hi =
261             DAG.getNode(ISD::SHL, DL, TotalVT, Hi,
262                         DAG.getConstant(Lo.getValueSizeInBits(), DL,
263                                         TLI.getPointerTy(DAG.getDataLayout())));
264         Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo);
265         Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi);
266       }
267     } else if (PartVT.isFloatingPoint()) {
268       // FP split into multiple FP parts (for ppcf128)
269       assert(ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
270              "Unexpected split");
271       SDValue Lo, Hi;
272       Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]);
273       Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]);
274       if (TLI.hasBigEndianPartOrdering(ValueVT, DAG.getDataLayout()))
275         std::swap(Lo, Hi);
276       Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi);
277     } else {
278       // FP split into integer parts (soft fp)
279       assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
280              !PartVT.isVector() && "Unexpected split");
281       EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
282       Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V, CC);
283     }
284   }
285 
286   // There is now one part, held in Val.  Correct it to match ValueVT.
287   // PartEVT is the type of the register class that holds the value.
288   // ValueVT is the type of the inline asm operation.
289   EVT PartEVT = Val.getValueType();
290 
291   if (PartEVT == ValueVT)
292     return Val;
293 
294   if (PartEVT.isInteger() && ValueVT.isFloatingPoint() &&
295       ValueVT.bitsLT(PartEVT)) {
296     // For an FP value in an integer part, we need to truncate to the right
297     // width first.
298     PartEVT = EVT::getIntegerVT(*DAG.getContext(),  ValueVT.getSizeInBits());
299     Val = DAG.getNode(ISD::TRUNCATE, DL, PartEVT, Val);
300   }
301 
302   // Handle types that have the same size.
303   if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits())
304     return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
305 
306   // Handle types with different sizes.
307   if (PartEVT.isInteger() && ValueVT.isInteger()) {
308     if (ValueVT.bitsLT(PartEVT)) {
309       // For a truncate, see if we have any information to
310       // indicate whether the truncated bits will always be
311       // zero or sign-extension.
312       if (AssertOp.hasValue())
313         Val = DAG.getNode(*AssertOp, DL, PartEVT, Val,
314                           DAG.getValueType(ValueVT));
315       return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
316     }
317     return DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
318   }
319 
320   if (PartEVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
321     // FP_ROUND's are always exact here.
322     if (ValueVT.bitsLT(Val.getValueType()))
323       return DAG.getNode(
324           ISD::FP_ROUND, DL, ValueVT, Val,
325           DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout())));
326 
327     return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val);
328   }
329 
330   // Handle MMX to a narrower integer type by bitcasting MMX to integer and
331   // then truncating.
332   if (PartEVT == MVT::x86mmx && ValueVT.isInteger() &&
333       ValueVT.bitsLT(PartEVT)) {
334     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i64, Val);
335     return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
336   }
337 
338   report_fatal_error("Unknown mismatch in getCopyFromParts!");
339 }
340 
341 static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V,
342                                               const Twine &ErrMsg) {
343   const Instruction *I = dyn_cast_or_null<Instruction>(V);
344   if (!V)
345     return Ctx.emitError(ErrMsg);
346 
347   const char *AsmError = ", possible invalid constraint for vector type";
348   if (const CallInst *CI = dyn_cast<CallInst>(I))
349     if (CI->isInlineAsm())
350       return Ctx.emitError(I, ErrMsg + AsmError);
351 
352   return Ctx.emitError(I, ErrMsg);
353 }
354 
355 /// getCopyFromPartsVector - Create a value that contains the specified legal
356 /// parts combined into the value they represent.  If the parts combine to a
357 /// type larger than ValueVT then AssertOp can be used to specify whether the
358 /// extra bits are known to be zero (ISD::AssertZext) or sign extended from
359 /// ValueVT (ISD::AssertSext).
360 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
361                                       const SDValue *Parts, unsigned NumParts,
362                                       MVT PartVT, EVT ValueVT, const Value *V,
363                                       Optional<CallingConv::ID> CallConv) {
364   assert(ValueVT.isVector() && "Not a vector value");
365   assert(NumParts > 0 && "No parts to assemble!");
366   const bool IsABIRegCopy = CallConv.hasValue();
367 
368   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
369   SDValue Val = Parts[0];
370 
371   // Handle a multi-element vector.
372   if (NumParts > 1) {
373     EVT IntermediateVT;
374     MVT RegisterVT;
375     unsigned NumIntermediates;
376     unsigned NumRegs;
377 
378     if (IsABIRegCopy) {
379       NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
380           *DAG.getContext(), CallConv.getValue(), ValueVT, IntermediateVT,
381           NumIntermediates, RegisterVT);
382     } else {
383       NumRegs =
384           TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
385                                      NumIntermediates, RegisterVT);
386     }
387 
388     assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
389     NumParts = NumRegs; // Silence a compiler warning.
390     assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
391     assert(RegisterVT.getSizeInBits() ==
392            Parts[0].getSimpleValueType().getSizeInBits() &&
393            "Part type sizes don't match!");
394 
395     // Assemble the parts into intermediate operands.
396     SmallVector<SDValue, 8> Ops(NumIntermediates);
397     if (NumIntermediates == NumParts) {
398       // If the register was not expanded, truncate or copy the value,
399       // as appropriate.
400       for (unsigned i = 0; i != NumParts; ++i)
401         Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1,
402                                   PartVT, IntermediateVT, V);
403     } else if (NumParts > 0) {
404       // If the intermediate type was expanded, build the intermediate
405       // operands from the parts.
406       assert(NumParts % NumIntermediates == 0 &&
407              "Must expand into a divisible number of parts!");
408       unsigned Factor = NumParts / NumIntermediates;
409       for (unsigned i = 0; i != NumIntermediates; ++i)
410         Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor,
411                                   PartVT, IntermediateVT, V);
412     }
413 
414     // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
415     // intermediate operands.
416     EVT BuiltVectorTy =
417         IntermediateVT.isVector()
418             ? EVT::getVectorVT(
419                   *DAG.getContext(), IntermediateVT.getScalarType(),
420                   IntermediateVT.getVectorElementCount() * NumParts)
421             : EVT::getVectorVT(*DAG.getContext(),
422                                IntermediateVT.getScalarType(),
423                                NumIntermediates);
424     Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS
425                                                 : ISD::BUILD_VECTOR,
426                       DL, BuiltVectorTy, Ops);
427   }
428 
429   // There is now one part, held in Val.  Correct it to match ValueVT.
430   EVT PartEVT = Val.getValueType();
431 
432   if (PartEVT == ValueVT)
433     return Val;
434 
435   if (PartEVT.isVector()) {
436     // If the element type of the source/dest vectors are the same, but the
437     // parts vector has more elements than the value vector, then we have a
438     // vector widening case (e.g. <2 x float> -> <4 x float>).  Extract the
439     // elements we want.
440     if (PartEVT.getVectorElementType() == ValueVT.getVectorElementType()) {
441       assert((PartEVT.getVectorElementCount().Min >
442               ValueVT.getVectorElementCount().Min) &&
443              (PartEVT.getVectorElementCount().Scalable ==
444               ValueVT.getVectorElementCount().Scalable) &&
445              "Cannot narrow, it would be a lossy transformation");
446       return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
447                          DAG.getVectorIdxConstant(0, DL));
448     }
449 
450     // Vector/Vector bitcast.
451     if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
452       return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
453 
454     assert(PartEVT.getVectorElementCount() == ValueVT.getVectorElementCount() &&
455       "Cannot handle this kind of promotion");
456     // Promoted vector extract
457     return DAG.getAnyExtOrTrunc(Val, DL, ValueVT);
458 
459   }
460 
461   // Trivial bitcast if the types are the same size and the destination
462   // vector type is legal.
463   if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits() &&
464       TLI.isTypeLegal(ValueVT))
465     return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
466 
467   if (ValueVT.getVectorNumElements() != 1) {
468      // Certain ABIs require that vectors are passed as integers. For vectors
469      // are the same size, this is an obvious bitcast.
470      if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) {
471        return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
472      } else if (ValueVT.getSizeInBits() < PartEVT.getSizeInBits()) {
473        // Bitcast Val back the original type and extract the corresponding
474        // vector we want.
475        unsigned Elts = PartEVT.getSizeInBits() / ValueVT.getScalarSizeInBits();
476        EVT WiderVecType = EVT::getVectorVT(*DAG.getContext(),
477                                            ValueVT.getVectorElementType(), Elts);
478        Val = DAG.getBitcast(WiderVecType, Val);
479        return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
480                           DAG.getVectorIdxConstant(0, DL));
481      }
482 
483      diagnosePossiblyInvalidConstraint(
484          *DAG.getContext(), V, "non-trivial scalar-to-vector conversion");
485      return DAG.getUNDEF(ValueVT);
486   }
487 
488   // Handle cases such as i8 -> <1 x i1>
489   EVT ValueSVT = ValueVT.getVectorElementType();
490   if (ValueVT.getVectorNumElements() == 1 && ValueSVT != PartEVT) {
491     if (ValueSVT.getSizeInBits() == PartEVT.getSizeInBits())
492       Val = DAG.getNode(ISD::BITCAST, DL, ValueSVT, Val);
493     else
494       Val = ValueVT.isFloatingPoint()
495                 ? DAG.getFPExtendOrRound(Val, DL, ValueSVT)
496                 : DAG.getAnyExtOrTrunc(Val, DL, ValueSVT);
497   }
498 
499   return DAG.getBuildVector(ValueVT, DL, Val);
500 }
501 
502 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl,
503                                  SDValue Val, SDValue *Parts, unsigned NumParts,
504                                  MVT PartVT, const Value *V,
505                                  Optional<CallingConv::ID> CallConv);
506 
507 /// getCopyToParts - Create a series of nodes that contain the specified value
508 /// split into legal parts.  If the parts contain more bits than Val, then, for
509 /// integers, ExtendKind can be used to specify how to generate the extra bits.
510 static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val,
511                            SDValue *Parts, unsigned NumParts, MVT PartVT,
512                            const Value *V,
513                            Optional<CallingConv::ID> CallConv = None,
514                            ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
515   EVT ValueVT = Val.getValueType();
516 
517   // Handle the vector case separately.
518   if (ValueVT.isVector())
519     return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V,
520                                 CallConv);
521 
522   unsigned PartBits = PartVT.getSizeInBits();
523   unsigned OrigNumParts = NumParts;
524   assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) &&
525          "Copying to an illegal type!");
526 
527   if (NumParts == 0)
528     return;
529 
530   assert(!ValueVT.isVector() && "Vector case handled elsewhere");
531   EVT PartEVT = PartVT;
532   if (PartEVT == ValueVT) {
533     assert(NumParts == 1 && "No-op copy with multiple parts!");
534     Parts[0] = Val;
535     return;
536   }
537 
538   if (NumParts * PartBits > ValueVT.getSizeInBits()) {
539     // If the parts cover more bits than the value has, promote the value.
540     if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
541       assert(NumParts == 1 && "Do not know what to promote to!");
542       Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val);
543     } else {
544       if (ValueVT.isFloatingPoint()) {
545         // FP values need to be bitcast, then extended if they are being put
546         // into a larger container.
547         ValueVT = EVT::getIntegerVT(*DAG.getContext(),  ValueVT.getSizeInBits());
548         Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
549       }
550       assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
551              ValueVT.isInteger() &&
552              "Unknown mismatch!");
553       ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
554       Val = DAG.getNode(ExtendKind, DL, ValueVT, Val);
555       if (PartVT == MVT::x86mmx)
556         Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
557     }
558   } else if (PartBits == ValueVT.getSizeInBits()) {
559     // Different types of the same size.
560     assert(NumParts == 1 && PartEVT != ValueVT);
561     Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
562   } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
563     // If the parts cover less bits than value has, truncate the value.
564     assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
565            ValueVT.isInteger() &&
566            "Unknown mismatch!");
567     ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
568     Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
569     if (PartVT == MVT::x86mmx)
570       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
571   }
572 
573   // The value may have changed - recompute ValueVT.
574   ValueVT = Val.getValueType();
575   assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
576          "Failed to tile the value with PartVT!");
577 
578   if (NumParts == 1) {
579     if (PartEVT != ValueVT) {
580       diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
581                                         "scalar-to-vector conversion failed");
582       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
583     }
584 
585     Parts[0] = Val;
586     return;
587   }
588 
589   // Expand the value into multiple parts.
590   if (NumParts & (NumParts - 1)) {
591     // The number of parts is not a power of 2.  Split off and copy the tail.
592     assert(PartVT.isInteger() && ValueVT.isInteger() &&
593            "Do not know what to expand to!");
594     unsigned RoundParts = 1 << Log2_32(NumParts);
595     unsigned RoundBits = RoundParts * PartBits;
596     unsigned OddParts = NumParts - RoundParts;
597     SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val,
598       DAG.getShiftAmountConstant(RoundBits, ValueVT, DL, /*LegalTypes*/false));
599 
600     getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V,
601                    CallConv);
602 
603     if (DAG.getDataLayout().isBigEndian())
604       // The odd parts were reversed by getCopyToParts - unreverse them.
605       std::reverse(Parts + RoundParts, Parts + NumParts);
606 
607     NumParts = RoundParts;
608     ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
609     Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
610   }
611 
612   // The number of parts is a power of 2.  Repeatedly bisect the value using
613   // EXTRACT_ELEMENT.
614   Parts[0] = DAG.getNode(ISD::BITCAST, DL,
615                          EVT::getIntegerVT(*DAG.getContext(),
616                                            ValueVT.getSizeInBits()),
617                          Val);
618 
619   for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
620     for (unsigned i = 0; i < NumParts; i += StepSize) {
621       unsigned ThisBits = StepSize * PartBits / 2;
622       EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
623       SDValue &Part0 = Parts[i];
624       SDValue &Part1 = Parts[i+StepSize/2];
625 
626       Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
627                           ThisVT, Part0, DAG.getIntPtrConstant(1, DL));
628       Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
629                           ThisVT, Part0, DAG.getIntPtrConstant(0, DL));
630 
631       if (ThisBits == PartBits && ThisVT != PartVT) {
632         Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0);
633         Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1);
634       }
635     }
636   }
637 
638   if (DAG.getDataLayout().isBigEndian())
639     std::reverse(Parts, Parts + OrigNumParts);
640 }
641 
642 static SDValue widenVectorToPartType(SelectionDAG &DAG,
643                                      SDValue Val, const SDLoc &DL, EVT PartVT) {
644   if (!PartVT.isVector())
645     return SDValue();
646 
647   EVT ValueVT = Val.getValueType();
648   unsigned PartNumElts = PartVT.getVectorNumElements();
649   unsigned ValueNumElts = ValueVT.getVectorNumElements();
650   if (PartNumElts > ValueNumElts &&
651       PartVT.getVectorElementType() == ValueVT.getVectorElementType()) {
652     EVT ElementVT = PartVT.getVectorElementType();
653     // Vector widening case, e.g. <2 x float> -> <4 x float>.  Shuffle in
654     // undef elements.
655     SmallVector<SDValue, 16> Ops;
656     DAG.ExtractVectorElements(Val, Ops);
657     SDValue EltUndef = DAG.getUNDEF(ElementVT);
658     for (unsigned i = ValueNumElts, e = PartNumElts; i != e; ++i)
659       Ops.push_back(EltUndef);
660 
661     // FIXME: Use CONCAT for 2x -> 4x.
662     return DAG.getBuildVector(PartVT, DL, Ops);
663   }
664 
665   return SDValue();
666 }
667 
668 /// getCopyToPartsVector - Create a series of nodes that contain the specified
669 /// value split into legal parts.
670 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
671                                  SDValue Val, SDValue *Parts, unsigned NumParts,
672                                  MVT PartVT, const Value *V,
673                                  Optional<CallingConv::ID> CallConv) {
674   EVT ValueVT = Val.getValueType();
675   assert(ValueVT.isVector() && "Not a vector");
676   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
677   const bool IsABIRegCopy = CallConv.hasValue();
678 
679   if (NumParts == 1) {
680     EVT PartEVT = PartVT;
681     if (PartEVT == ValueVT) {
682       // Nothing to do.
683     } else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
684       // Bitconvert vector->vector case.
685       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
686     } else if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, PartVT)) {
687       Val = Widened;
688     } else if (PartVT.isVector() &&
689                PartEVT.getVectorElementType().bitsGE(
690                  ValueVT.getVectorElementType()) &&
691                PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements()) {
692 
693       // Promoted vector extract
694       Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
695     } else {
696       if (ValueVT.getVectorNumElements() == 1) {
697         Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, PartVT, Val,
698                           DAG.getVectorIdxConstant(0, DL));
699       } else {
700         assert(PartVT.getSizeInBits() > ValueVT.getSizeInBits() &&
701                "lossy conversion of vector to scalar type");
702         EVT IntermediateType =
703             EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
704         Val = DAG.getBitcast(IntermediateType, Val);
705         Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
706       }
707     }
708 
709     assert(Val.getValueType() == PartVT && "Unexpected vector part value type");
710     Parts[0] = Val;
711     return;
712   }
713 
714   // Handle a multi-element vector.
715   EVT IntermediateVT;
716   MVT RegisterVT;
717   unsigned NumIntermediates;
718   unsigned NumRegs;
719   if (IsABIRegCopy) {
720     NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
721         *DAG.getContext(), CallConv.getValue(), ValueVT, IntermediateVT,
722         NumIntermediates, RegisterVT);
723   } else {
724     NumRegs =
725         TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
726                                    NumIntermediates, RegisterVT);
727   }
728 
729   assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
730   NumParts = NumRegs; // Silence a compiler warning.
731   assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
732 
733   unsigned IntermediateNumElts = IntermediateVT.isVector() ?
734     IntermediateVT.getVectorNumElements() : 1;
735 
736   // Convert the vector to the appropriate type if necessary.
737   auto DestEltCnt = ElementCount(NumIntermediates * IntermediateNumElts,
738                                  ValueVT.isScalableVector());
739   EVT BuiltVectorTy = EVT::getVectorVT(
740       *DAG.getContext(), IntermediateVT.getScalarType(), DestEltCnt);
741   if (ValueVT != BuiltVectorTy) {
742     if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, BuiltVectorTy))
743       Val = Widened;
744 
745     Val = DAG.getNode(ISD::BITCAST, DL, BuiltVectorTy, Val);
746   }
747 
748   // Split the vector into intermediate operands.
749   SmallVector<SDValue, 8> Ops(NumIntermediates);
750   for (unsigned i = 0; i != NumIntermediates; ++i) {
751     if (IntermediateVT.isVector()) {
752       Ops[i] =
753           DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, IntermediateVT, Val,
754                       DAG.getVectorIdxConstant(i * IntermediateNumElts, DL));
755     } else {
756       Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, IntermediateVT, Val,
757                            DAG.getVectorIdxConstant(i, DL));
758     }
759   }
760 
761   // Split the intermediate operands into legal parts.
762   if (NumParts == NumIntermediates) {
763     // If the register was not expanded, promote or copy the value,
764     // as appropriate.
765     for (unsigned i = 0; i != NumParts; ++i)
766       getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V, CallConv);
767   } else if (NumParts > 0) {
768     // If the intermediate type was expanded, split each the value into
769     // legal parts.
770     assert(NumIntermediates != 0 && "division by zero");
771     assert(NumParts % NumIntermediates == 0 &&
772            "Must expand into a divisible number of parts!");
773     unsigned Factor = NumParts / NumIntermediates;
774     for (unsigned i = 0; i != NumIntermediates; ++i)
775       getCopyToParts(DAG, DL, Ops[i], &Parts[i * Factor], Factor, PartVT, V,
776                      CallConv);
777   }
778 }
779 
780 RegsForValue::RegsForValue(const SmallVector<unsigned, 4> &regs, MVT regvt,
781                            EVT valuevt, Optional<CallingConv::ID> CC)
782     : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs),
783       RegCount(1, regs.size()), CallConv(CC) {}
784 
785 RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
786                            const DataLayout &DL, unsigned Reg, Type *Ty,
787                            Optional<CallingConv::ID> CC) {
788   ComputeValueVTs(TLI, DL, Ty, ValueVTs);
789 
790   CallConv = CC;
791 
792   for (EVT ValueVT : ValueVTs) {
793     unsigned NumRegs =
794         isABIMangled()
795             ? TLI.getNumRegistersForCallingConv(Context, CC.getValue(), ValueVT)
796             : TLI.getNumRegisters(Context, ValueVT);
797     MVT RegisterVT =
798         isABIMangled()
799             ? TLI.getRegisterTypeForCallingConv(Context, CC.getValue(), ValueVT)
800             : TLI.getRegisterType(Context, ValueVT);
801     for (unsigned i = 0; i != NumRegs; ++i)
802       Regs.push_back(Reg + i);
803     RegVTs.push_back(RegisterVT);
804     RegCount.push_back(NumRegs);
805     Reg += NumRegs;
806   }
807 }
808 
809 SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
810                                       FunctionLoweringInfo &FuncInfo,
811                                       const SDLoc &dl, SDValue &Chain,
812                                       SDValue *Flag, const Value *V) const {
813   // A Value with type {} or [0 x %t] needs no registers.
814   if (ValueVTs.empty())
815     return SDValue();
816 
817   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
818 
819   // Assemble the legal parts into the final values.
820   SmallVector<SDValue, 4> Values(ValueVTs.size());
821   SmallVector<SDValue, 8> Parts;
822   for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
823     // Copy the legal parts from the registers.
824     EVT ValueVT = ValueVTs[Value];
825     unsigned NumRegs = RegCount[Value];
826     MVT RegisterVT = isABIMangled() ? TLI.getRegisterTypeForCallingConv(
827                                           *DAG.getContext(),
828                                           CallConv.getValue(), RegVTs[Value])
829                                     : RegVTs[Value];
830 
831     Parts.resize(NumRegs);
832     for (unsigned i = 0; i != NumRegs; ++i) {
833       SDValue P;
834       if (!Flag) {
835         P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
836       } else {
837         P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag);
838         *Flag = P.getValue(2);
839       }
840 
841       Chain = P.getValue(1);
842       Parts[i] = P;
843 
844       // If the source register was virtual and if we know something about it,
845       // add an assert node.
846       if (!Register::isVirtualRegister(Regs[Part + i]) ||
847           !RegisterVT.isInteger())
848         continue;
849 
850       const FunctionLoweringInfo::LiveOutInfo *LOI =
851         FuncInfo.GetLiveOutRegInfo(Regs[Part+i]);
852       if (!LOI)
853         continue;
854 
855       unsigned RegSize = RegisterVT.getScalarSizeInBits();
856       unsigned NumSignBits = LOI->NumSignBits;
857       unsigned NumZeroBits = LOI->Known.countMinLeadingZeros();
858 
859       if (NumZeroBits == RegSize) {
860         // The current value is a zero.
861         // Explicitly express that as it would be easier for
862         // optimizations to kick in.
863         Parts[i] = DAG.getConstant(0, dl, RegisterVT);
864         continue;
865       }
866 
867       // FIXME: We capture more information than the dag can represent.  For
868       // now, just use the tightest assertzext/assertsext possible.
869       bool isSExt;
870       EVT FromVT(MVT::Other);
871       if (NumZeroBits) {
872         FromVT = EVT::getIntegerVT(*DAG.getContext(), RegSize - NumZeroBits);
873         isSExt = false;
874       } else if (NumSignBits > 1) {
875         FromVT =
876             EVT::getIntegerVT(*DAG.getContext(), RegSize - NumSignBits + 1);
877         isSExt = true;
878       } else {
879         continue;
880       }
881       // Add an assertion node.
882       assert(FromVT != MVT::Other);
883       Parts[i] = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
884                              RegisterVT, P, DAG.getValueType(FromVT));
885     }
886 
887     Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(), NumRegs,
888                                      RegisterVT, ValueVT, V, CallConv);
889     Part += NumRegs;
890     Parts.clear();
891   }
892 
893   return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(ValueVTs), Values);
894 }
895 
896 void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG,
897                                  const SDLoc &dl, SDValue &Chain, SDValue *Flag,
898                                  const Value *V,
899                                  ISD::NodeType PreferredExtendType) const {
900   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
901   ISD::NodeType ExtendKind = PreferredExtendType;
902 
903   // Get the list of the values's legal parts.
904   unsigned NumRegs = Regs.size();
905   SmallVector<SDValue, 8> Parts(NumRegs);
906   for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
907     unsigned NumParts = RegCount[Value];
908 
909     MVT RegisterVT = isABIMangled() ? TLI.getRegisterTypeForCallingConv(
910                                           *DAG.getContext(),
911                                           CallConv.getValue(), RegVTs[Value])
912                                     : RegVTs[Value];
913 
914     if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT))
915       ExtendKind = ISD::ZERO_EXTEND;
916 
917     getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value), &Parts[Part],
918                    NumParts, RegisterVT, V, CallConv, ExtendKind);
919     Part += NumParts;
920   }
921 
922   // Copy the parts into the registers.
923   SmallVector<SDValue, 8> Chains(NumRegs);
924   for (unsigned i = 0; i != NumRegs; ++i) {
925     SDValue Part;
926     if (!Flag) {
927       Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
928     } else {
929       Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag);
930       *Flag = Part.getValue(1);
931     }
932 
933     Chains[i] = Part.getValue(0);
934   }
935 
936   if (NumRegs == 1 || Flag)
937     // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is
938     // flagged to it. That is the CopyToReg nodes and the user are considered
939     // a single scheduling unit. If we create a TokenFactor and return it as
940     // chain, then the TokenFactor is both a predecessor (operand) of the
941     // user as well as a successor (the TF operands are flagged to the user).
942     // c1, f1 = CopyToReg
943     // c2, f2 = CopyToReg
944     // c3     = TokenFactor c1, c2
945     // ...
946     //        = op c3, ..., f2
947     Chain = Chains[NumRegs-1];
948   else
949     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
950 }
951 
952 void RegsForValue::AddInlineAsmOperands(unsigned Code, bool HasMatching,
953                                         unsigned MatchingIdx, const SDLoc &dl,
954                                         SelectionDAG &DAG,
955                                         std::vector<SDValue> &Ops) const {
956   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
957 
958   unsigned Flag = InlineAsm::getFlagWord(Code, Regs.size());
959   if (HasMatching)
960     Flag = InlineAsm::getFlagWordForMatchingOp(Flag, MatchingIdx);
961   else if (!Regs.empty() && Register::isVirtualRegister(Regs.front())) {
962     // Put the register class of the virtual registers in the flag word.  That
963     // way, later passes can recompute register class constraints for inline
964     // assembly as well as normal instructions.
965     // Don't do this for tied operands that can use the regclass information
966     // from the def.
967     const MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
968     const TargetRegisterClass *RC = MRI.getRegClass(Regs.front());
969     Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID());
970   }
971 
972   SDValue Res = DAG.getTargetConstant(Flag, dl, MVT::i32);
973   Ops.push_back(Res);
974 
975   if (Code == InlineAsm::Kind_Clobber) {
976     // Clobbers should always have a 1:1 mapping with registers, and may
977     // reference registers that have illegal (e.g. vector) types. Hence, we
978     // shouldn't try to apply any sort of splitting logic to them.
979     assert(Regs.size() == RegVTs.size() && Regs.size() == ValueVTs.size() &&
980            "No 1:1 mapping from clobbers to regs?");
981     unsigned SP = TLI.getStackPointerRegisterToSaveRestore();
982     (void)SP;
983     for (unsigned I = 0, E = ValueVTs.size(); I != E; ++I) {
984       Ops.push_back(DAG.getRegister(Regs[I], RegVTs[I]));
985       assert(
986           (Regs[I] != SP ||
987            DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment()) &&
988           "If we clobbered the stack pointer, MFI should know about it.");
989     }
990     return;
991   }
992 
993   for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
994     unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value]);
995     MVT RegisterVT = RegVTs[Value];
996     for (unsigned i = 0; i != NumRegs; ++i) {
997       assert(Reg < Regs.size() && "Mismatch in # registers expected");
998       unsigned TheReg = Regs[Reg++];
999       Ops.push_back(DAG.getRegister(TheReg, RegisterVT));
1000     }
1001   }
1002 }
1003 
1004 SmallVector<std::pair<unsigned, unsigned>, 4>
1005 RegsForValue::getRegsAndSizes() const {
1006   SmallVector<std::pair<unsigned, unsigned>, 4> OutVec;
1007   unsigned I = 0;
1008   for (auto CountAndVT : zip_first(RegCount, RegVTs)) {
1009     unsigned RegCount = std::get<0>(CountAndVT);
1010     MVT RegisterVT = std::get<1>(CountAndVT);
1011     unsigned RegisterSize = RegisterVT.getSizeInBits();
1012     for (unsigned E = I + RegCount; I != E; ++I)
1013       OutVec.push_back(std::make_pair(Regs[I], RegisterSize));
1014   }
1015   return OutVec;
1016 }
1017 
1018 void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis *aa,
1019                                const TargetLibraryInfo *li) {
1020   AA = aa;
1021   GFI = gfi;
1022   LibInfo = li;
1023   DL = &DAG.getDataLayout();
1024   Context = DAG.getContext();
1025   LPadToCallSiteMap.clear();
1026   SL->init(DAG.getTargetLoweringInfo(), TM, DAG.getDataLayout());
1027 }
1028 
1029 void SelectionDAGBuilder::clear() {
1030   NodeMap.clear();
1031   UnusedArgNodeMap.clear();
1032   PendingLoads.clear();
1033   PendingExports.clear();
1034   PendingConstrainedFP.clear();
1035   PendingConstrainedFPStrict.clear();
1036   CurInst = nullptr;
1037   HasTailCall = false;
1038   SDNodeOrder = LowestSDNodeOrder;
1039   StatepointLowering.clear();
1040 }
1041 
1042 void SelectionDAGBuilder::clearDanglingDebugInfo() {
1043   DanglingDebugInfoMap.clear();
1044 }
1045 
1046 // Update DAG root to include dependencies on Pending chains.
1047 SDValue SelectionDAGBuilder::updateRoot(SmallVectorImpl<SDValue> &Pending) {
1048   SDValue Root = DAG.getRoot();
1049 
1050   if (Pending.empty())
1051     return Root;
1052 
1053   // Add current root to PendingChains, unless we already indirectly
1054   // depend on it.
1055   if (Root.getOpcode() != ISD::EntryToken) {
1056     unsigned i = 0, e = Pending.size();
1057     for (; i != e; ++i) {
1058       assert(Pending[i].getNode()->getNumOperands() > 1);
1059       if (Pending[i].getNode()->getOperand(0) == Root)
1060         break;  // Don't add the root if we already indirectly depend on it.
1061     }
1062 
1063     if (i == e)
1064       Pending.push_back(Root);
1065   }
1066 
1067   if (Pending.size() == 1)
1068     Root = Pending[0];
1069   else
1070     Root = DAG.getTokenFactor(getCurSDLoc(), Pending);
1071 
1072   DAG.setRoot(Root);
1073   Pending.clear();
1074   return Root;
1075 }
1076 
1077 SDValue SelectionDAGBuilder::getMemoryRoot() {
1078   return updateRoot(PendingLoads);
1079 }
1080 
1081 SDValue SelectionDAGBuilder::getRoot() {
1082   // Chain up all pending constrained intrinsics together with all
1083   // pending loads, by simply appending them to PendingLoads and
1084   // then calling getMemoryRoot().
1085   PendingLoads.reserve(PendingLoads.size() +
1086                        PendingConstrainedFP.size() +
1087                        PendingConstrainedFPStrict.size());
1088   PendingLoads.append(PendingConstrainedFP.begin(),
1089                       PendingConstrainedFP.end());
1090   PendingLoads.append(PendingConstrainedFPStrict.begin(),
1091                       PendingConstrainedFPStrict.end());
1092   PendingConstrainedFP.clear();
1093   PendingConstrainedFPStrict.clear();
1094   return getMemoryRoot();
1095 }
1096 
1097 SDValue SelectionDAGBuilder::getControlRoot() {
1098   // We need to emit pending fpexcept.strict constrained intrinsics,
1099   // so append them to the PendingExports list.
1100   PendingExports.append(PendingConstrainedFPStrict.begin(),
1101                         PendingConstrainedFPStrict.end());
1102   PendingConstrainedFPStrict.clear();
1103   return updateRoot(PendingExports);
1104 }
1105 
1106 void SelectionDAGBuilder::visit(const Instruction &I) {
1107   // Set up outgoing PHI node register values before emitting the terminator.
1108   if (I.isTerminator()) {
1109     HandlePHINodesInSuccessorBlocks(I.getParent());
1110   }
1111 
1112   // Increase the SDNodeOrder if dealing with a non-debug instruction.
1113   if (!isa<DbgInfoIntrinsic>(I))
1114     ++SDNodeOrder;
1115 
1116   CurInst = &I;
1117 
1118   visit(I.getOpcode(), I);
1119 
1120   if (auto *FPMO = dyn_cast<FPMathOperator>(&I)) {
1121     // ConstrainedFPIntrinsics handle their own FMF.
1122     if (!isa<ConstrainedFPIntrinsic>(&I)) {
1123       // Propagate the fast-math-flags of this IR instruction to the DAG node that
1124       // maps to this instruction.
1125       // TODO: We could handle all flags (nsw, etc) here.
1126       // TODO: If an IR instruction maps to >1 node, only the final node will have
1127       //       flags set.
1128       if (SDNode *Node = getNodeForIRValue(&I)) {
1129         SDNodeFlags IncomingFlags;
1130         IncomingFlags.copyFMF(*FPMO);
1131         if (!Node->getFlags().isDefined())
1132           Node->setFlags(IncomingFlags);
1133         else
1134           Node->intersectFlagsWith(IncomingFlags);
1135       }
1136     }
1137   }
1138 
1139   if (!I.isTerminator() && !HasTailCall &&
1140       !isa<GCStatepointInst>(I)) // statepoints handle their exports internally
1141     CopyToExportRegsIfNeeded(&I);
1142 
1143   CurInst = nullptr;
1144 }
1145 
1146 void SelectionDAGBuilder::visitPHI(const PHINode &) {
1147   llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!");
1148 }
1149 
1150 void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) {
1151   // Note: this doesn't use InstVisitor, because it has to work with
1152   // ConstantExpr's in addition to instructions.
1153   switch (Opcode) {
1154   default: llvm_unreachable("Unknown instruction type encountered!");
1155     // Build the switch statement using the Instruction.def file.
1156 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1157     case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
1158 #include "llvm/IR/Instruction.def"
1159   }
1160 }
1161 
1162 void SelectionDAGBuilder::dropDanglingDebugInfo(const DILocalVariable *Variable,
1163                                                 const DIExpression *Expr) {
1164   auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) {
1165     const DbgValueInst *DI = DDI.getDI();
1166     DIVariable *DanglingVariable = DI->getVariable();
1167     DIExpression *DanglingExpr = DI->getExpression();
1168     if (DanglingVariable == Variable && Expr->fragmentsOverlap(DanglingExpr)) {
1169       LLVM_DEBUG(dbgs() << "Dropping dangling debug info for " << *DI << "\n");
1170       return true;
1171     }
1172     return false;
1173   };
1174 
1175   for (auto &DDIMI : DanglingDebugInfoMap) {
1176     DanglingDebugInfoVector &DDIV = DDIMI.second;
1177 
1178     // If debug info is to be dropped, run it through final checks to see
1179     // whether it can be salvaged.
1180     for (auto &DDI : DDIV)
1181       if (isMatchingDbgValue(DDI))
1182         salvageUnresolvedDbgValue(DDI);
1183 
1184     DDIV.erase(remove_if(DDIV, isMatchingDbgValue), DDIV.end());
1185   }
1186 }
1187 
1188 // resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V,
1189 // generate the debug data structures now that we've seen its definition.
1190 void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V,
1191                                                    SDValue Val) {
1192   auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V);
1193   if (DanglingDbgInfoIt == DanglingDebugInfoMap.end())
1194     return;
1195 
1196   DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
1197   for (auto &DDI : DDIV) {
1198     const DbgValueInst *DI = DDI.getDI();
1199     assert(DI && "Ill-formed DanglingDebugInfo");
1200     DebugLoc dl = DDI.getdl();
1201     unsigned ValSDNodeOrder = Val.getNode()->getIROrder();
1202     unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
1203     DILocalVariable *Variable = DI->getVariable();
1204     DIExpression *Expr = DI->getExpression();
1205     assert(Variable->isValidLocationForIntrinsic(dl) &&
1206            "Expected inlined-at fields to agree");
1207     SDDbgValue *SDV;
1208     if (Val.getNode()) {
1209       // FIXME: I doubt that it is correct to resolve a dangling DbgValue as a
1210       // FuncArgumentDbgValue (it would be hoisted to the function entry, and if
1211       // we couldn't resolve it directly when examining the DbgValue intrinsic
1212       // in the first place we should not be more successful here). Unless we
1213       // have some test case that prove this to be correct we should avoid
1214       // calling EmitFuncArgumentDbgValue here.
1215       if (!EmitFuncArgumentDbgValue(V, Variable, Expr, dl, false, Val)) {
1216         LLVM_DEBUG(dbgs() << "Resolve dangling debug info [order="
1217                           << DbgSDNodeOrder << "] for:\n  " << *DI << "\n");
1218         LLVM_DEBUG(dbgs() << "  By mapping to:\n    "; Val.dump());
1219         // Increase the SDNodeOrder for the DbgValue here to make sure it is
1220         // inserted after the definition of Val when emitting the instructions
1221         // after ISel. An alternative could be to teach
1222         // ScheduleDAGSDNodes::EmitSchedule to delay the insertion properly.
1223         LLVM_DEBUG(if (ValSDNodeOrder > DbgSDNodeOrder) dbgs()
1224                    << "changing SDNodeOrder from " << DbgSDNodeOrder << " to "
1225                    << ValSDNodeOrder << "\n");
1226         SDV = getDbgValue(Val, Variable, Expr, dl,
1227                           std::max(DbgSDNodeOrder, ValSDNodeOrder));
1228         DAG.AddDbgValue(SDV, Val.getNode(), false);
1229       } else
1230         LLVM_DEBUG(dbgs() << "Resolved dangling debug info for " << *DI
1231                           << "in EmitFuncArgumentDbgValue\n");
1232     } else {
1233       LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1234       auto Undef =
1235           UndefValue::get(DDI.getDI()->getVariableLocation()->getType());
1236       auto SDV =
1237           DAG.getConstantDbgValue(Variable, Expr, Undef, dl, DbgSDNodeOrder);
1238       DAG.AddDbgValue(SDV, nullptr, false);
1239     }
1240   }
1241   DDIV.clear();
1242 }
1243 
1244 void SelectionDAGBuilder::salvageUnresolvedDbgValue(DanglingDebugInfo &DDI) {
1245   Value *V = DDI.getDI()->getValue();
1246   DILocalVariable *Var = DDI.getDI()->getVariable();
1247   DIExpression *Expr = DDI.getDI()->getExpression();
1248   DebugLoc DL = DDI.getdl();
1249   DebugLoc InstDL = DDI.getDI()->getDebugLoc();
1250   unsigned SDOrder = DDI.getSDNodeOrder();
1251 
1252   // Currently we consider only dbg.value intrinsics -- we tell the salvager
1253   // that DW_OP_stack_value is desired.
1254   assert(isa<DbgValueInst>(DDI.getDI()));
1255   bool StackValue = true;
1256 
1257   // Can this Value can be encoded without any further work?
1258   if (handleDebugValue(V, Var, Expr, DL, InstDL, SDOrder))
1259     return;
1260 
1261   // Attempt to salvage back through as many instructions as possible. Bail if
1262   // a non-instruction is seen, such as a constant expression or global
1263   // variable. FIXME: Further work could recover those too.
1264   while (isa<Instruction>(V)) {
1265     Instruction &VAsInst = *cast<Instruction>(V);
1266     DIExpression *NewExpr = salvageDebugInfoImpl(VAsInst, Expr, StackValue);
1267 
1268     // If we cannot salvage any further, and haven't yet found a suitable debug
1269     // expression, bail out.
1270     if (!NewExpr)
1271       break;
1272 
1273     // New value and expr now represent this debuginfo.
1274     V = VAsInst.getOperand(0);
1275     Expr = NewExpr;
1276 
1277     // Some kind of simplification occurred: check whether the operand of the
1278     // salvaged debug expression can be encoded in this DAG.
1279     if (handleDebugValue(V, Var, Expr, DL, InstDL, SDOrder)) {
1280       LLVM_DEBUG(dbgs() << "Salvaged debug location info for:\n  "
1281                         << DDI.getDI() << "\nBy stripping back to:\n  " << V);
1282       return;
1283     }
1284   }
1285 
1286   // This was the final opportunity to salvage this debug information, and it
1287   // couldn't be done. Place an undef DBG_VALUE at this location to terminate
1288   // any earlier variable location.
1289   auto Undef = UndefValue::get(DDI.getDI()->getVariableLocation()->getType());
1290   auto SDV = DAG.getConstantDbgValue(Var, Expr, Undef, DL, SDNodeOrder);
1291   DAG.AddDbgValue(SDV, nullptr, false);
1292 
1293   LLVM_DEBUG(dbgs() << "Dropping debug value info for:\n  " << DDI.getDI()
1294                     << "\n");
1295   LLVM_DEBUG(dbgs() << "  Last seen at:\n    " << *DDI.getDI()->getOperand(0)
1296                     << "\n");
1297 }
1298 
1299 bool SelectionDAGBuilder::handleDebugValue(const Value *V, DILocalVariable *Var,
1300                                            DIExpression *Expr, DebugLoc dl,
1301                                            DebugLoc InstDL, unsigned Order) {
1302   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1303   SDDbgValue *SDV;
1304   if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V) ||
1305       isa<ConstantPointerNull>(V)) {
1306     SDV = DAG.getConstantDbgValue(Var, Expr, V, dl, SDNodeOrder);
1307     DAG.AddDbgValue(SDV, nullptr, false);
1308     return true;
1309   }
1310 
1311   // If the Value is a frame index, we can create a FrameIndex debug value
1312   // without relying on the DAG at all.
1313   if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1314     auto SI = FuncInfo.StaticAllocaMap.find(AI);
1315     if (SI != FuncInfo.StaticAllocaMap.end()) {
1316       auto SDV =
1317           DAG.getFrameIndexDbgValue(Var, Expr, SI->second,
1318                                     /*IsIndirect*/ false, dl, SDNodeOrder);
1319       // Do not attach the SDNodeDbgValue to an SDNode: this variable location
1320       // is still available even if the SDNode gets optimized out.
1321       DAG.AddDbgValue(SDV, nullptr, false);
1322       return true;
1323     }
1324   }
1325 
1326   // Do not use getValue() in here; we don't want to generate code at
1327   // this point if it hasn't been done yet.
1328   SDValue N = NodeMap[V];
1329   if (!N.getNode() && isa<Argument>(V)) // Check unused arguments map.
1330     N = UnusedArgNodeMap[V];
1331   if (N.getNode()) {
1332     if (EmitFuncArgumentDbgValue(V, Var, Expr, dl, false, N))
1333       return true;
1334     SDV = getDbgValue(N, Var, Expr, dl, SDNodeOrder);
1335     DAG.AddDbgValue(SDV, N.getNode(), false);
1336     return true;
1337   }
1338 
1339   // Special rules apply for the first dbg.values of parameter variables in a
1340   // function. Identify them by the fact they reference Argument Values, that
1341   // they're parameters, and they are parameters of the current function. We
1342   // need to let them dangle until they get an SDNode.
1343   bool IsParamOfFunc = isa<Argument>(V) && Var->isParameter() &&
1344                        !InstDL.getInlinedAt();
1345   if (!IsParamOfFunc) {
1346     // The value is not used in this block yet (or it would have an SDNode).
1347     // We still want the value to appear for the user if possible -- if it has
1348     // an associated VReg, we can refer to that instead.
1349     auto VMI = FuncInfo.ValueMap.find(V);
1350     if (VMI != FuncInfo.ValueMap.end()) {
1351       unsigned Reg = VMI->second;
1352       // If this is a PHI node, it may be split up into several MI PHI nodes
1353       // (in FunctionLoweringInfo::set).
1354       RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg,
1355                        V->getType(), None);
1356       if (RFV.occupiesMultipleRegs()) {
1357         unsigned Offset = 0;
1358         unsigned BitsToDescribe = 0;
1359         if (auto VarSize = Var->getSizeInBits())
1360           BitsToDescribe = *VarSize;
1361         if (auto Fragment = Expr->getFragmentInfo())
1362           BitsToDescribe = Fragment->SizeInBits;
1363         for (auto RegAndSize : RFV.getRegsAndSizes()) {
1364           unsigned RegisterSize = RegAndSize.second;
1365           // Bail out if all bits are described already.
1366           if (Offset >= BitsToDescribe)
1367             break;
1368           unsigned FragmentSize = (Offset + RegisterSize > BitsToDescribe)
1369               ? BitsToDescribe - Offset
1370               : RegisterSize;
1371           auto FragmentExpr = DIExpression::createFragmentExpression(
1372               Expr, Offset, FragmentSize);
1373           if (!FragmentExpr)
1374               continue;
1375           SDV = DAG.getVRegDbgValue(Var, *FragmentExpr, RegAndSize.first,
1376                                     false, dl, SDNodeOrder);
1377           DAG.AddDbgValue(SDV, nullptr, false);
1378           Offset += RegisterSize;
1379         }
1380       } else {
1381         SDV = DAG.getVRegDbgValue(Var, Expr, Reg, false, dl, SDNodeOrder);
1382         DAG.AddDbgValue(SDV, nullptr, false);
1383       }
1384       return true;
1385     }
1386   }
1387 
1388   return false;
1389 }
1390 
1391 void SelectionDAGBuilder::resolveOrClearDbgInfo() {
1392   // Try to fixup any remaining dangling debug info -- and drop it if we can't.
1393   for (auto &Pair : DanglingDebugInfoMap)
1394     for (auto &DDI : Pair.second)
1395       salvageUnresolvedDbgValue(DDI);
1396   clearDanglingDebugInfo();
1397 }
1398 
1399 /// getCopyFromRegs - If there was virtual register allocated for the value V
1400 /// emit CopyFromReg of the specified type Ty. Return empty SDValue() otherwise.
1401 SDValue SelectionDAGBuilder::getCopyFromRegs(const Value *V, Type *Ty) {
1402   DenseMap<const Value *, Register>::iterator It = FuncInfo.ValueMap.find(V);
1403   SDValue Result;
1404 
1405   if (It != FuncInfo.ValueMap.end()) {
1406     Register InReg = It->second;
1407 
1408     RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
1409                      DAG.getDataLayout(), InReg, Ty,
1410                      None); // This is not an ABI copy.
1411     SDValue Chain = DAG.getEntryNode();
1412     Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr,
1413                                  V);
1414     resolveDanglingDebugInfo(V, Result);
1415   }
1416 
1417   return Result;
1418 }
1419 
1420 /// getValue - Return an SDValue for the given Value.
1421 SDValue SelectionDAGBuilder::getValue(const Value *V) {
1422   // If we already have an SDValue for this value, use it. It's important
1423   // to do this first, so that we don't create a CopyFromReg if we already
1424   // have a regular SDValue.
1425   SDValue &N = NodeMap[V];
1426   if (N.getNode()) return N;
1427 
1428   // If there's a virtual register allocated and initialized for this
1429   // value, use it.
1430   if (SDValue copyFromReg = getCopyFromRegs(V, V->getType()))
1431     return copyFromReg;
1432 
1433   // Otherwise create a new SDValue and remember it.
1434   SDValue Val = getValueImpl(V);
1435   NodeMap[V] = Val;
1436   resolveDanglingDebugInfo(V, Val);
1437   return Val;
1438 }
1439 
1440 /// getNonRegisterValue - Return an SDValue for the given Value, but
1441 /// don't look in FuncInfo.ValueMap for a virtual register.
1442 SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) {
1443   // If we already have an SDValue for this value, use it.
1444   SDValue &N = NodeMap[V];
1445   if (N.getNode()) {
1446     if (isa<ConstantSDNode>(N) || isa<ConstantFPSDNode>(N)) {
1447       // Remove the debug location from the node as the node is about to be used
1448       // in a location which may differ from the original debug location.  This
1449       // is relevant to Constant and ConstantFP nodes because they can appear
1450       // as constant expressions inside PHI nodes.
1451       N->setDebugLoc(DebugLoc());
1452     }
1453     return N;
1454   }
1455 
1456   // Otherwise create a new SDValue and remember it.
1457   SDValue Val = getValueImpl(V);
1458   NodeMap[V] = Val;
1459   resolveDanglingDebugInfo(V, Val);
1460   return Val;
1461 }
1462 
1463 /// getValueImpl - Helper function for getValue and getNonRegisterValue.
1464 /// Create an SDValue for the given value.
1465 SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
1466   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1467 
1468   if (const Constant *C = dyn_cast<Constant>(V)) {
1469     EVT VT = TLI.getValueType(DAG.getDataLayout(), V->getType(), true);
1470 
1471     if (const ConstantInt *CI = dyn_cast<ConstantInt>(C))
1472       return DAG.getConstant(*CI, getCurSDLoc(), VT);
1473 
1474     if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
1475       return DAG.getGlobalAddress(GV, getCurSDLoc(), VT);
1476 
1477     if (isa<ConstantPointerNull>(C)) {
1478       unsigned AS = V->getType()->getPointerAddressSpace();
1479       return DAG.getConstant(0, getCurSDLoc(),
1480                              TLI.getPointerTy(DAG.getDataLayout(), AS));
1481     }
1482 
1483     if (match(C, m_VScale(DAG.getDataLayout())))
1484       return DAG.getVScale(getCurSDLoc(), VT, APInt(VT.getSizeInBits(), 1));
1485 
1486     if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
1487       return DAG.getConstantFP(*CFP, getCurSDLoc(), VT);
1488 
1489     if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
1490       return DAG.getUNDEF(VT);
1491 
1492     if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
1493       visit(CE->getOpcode(), *CE);
1494       SDValue N1 = NodeMap[V];
1495       assert(N1.getNode() && "visit didn't populate the NodeMap!");
1496       return N1;
1497     }
1498 
1499     if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
1500       SmallVector<SDValue, 4> Constants;
1501       for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end();
1502            OI != OE; ++OI) {
1503         SDNode *Val = getValue(*OI).getNode();
1504         // If the operand is an empty aggregate, there are no values.
1505         if (!Val) continue;
1506         // Add each leaf value from the operand to the Constants list
1507         // to form a flattened list of all the values.
1508         for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1509           Constants.push_back(SDValue(Val, i));
1510       }
1511 
1512       return DAG.getMergeValues(Constants, getCurSDLoc());
1513     }
1514 
1515     if (const ConstantDataSequential *CDS =
1516           dyn_cast<ConstantDataSequential>(C)) {
1517       SmallVector<SDValue, 4> Ops;
1518       for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1519         SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode();
1520         // Add each leaf value from the operand to the Constants list
1521         // to form a flattened list of all the values.
1522         for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1523           Ops.push_back(SDValue(Val, i));
1524       }
1525 
1526       if (isa<ArrayType>(CDS->getType()))
1527         return DAG.getMergeValues(Ops, getCurSDLoc());
1528       return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1529     }
1530 
1531     if (C->getType()->isStructTy() || C->getType()->isArrayTy()) {
1532       assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
1533              "Unknown struct or array constant!");
1534 
1535       SmallVector<EVT, 4> ValueVTs;
1536       ComputeValueVTs(TLI, DAG.getDataLayout(), C->getType(), ValueVTs);
1537       unsigned NumElts = ValueVTs.size();
1538       if (NumElts == 0)
1539         return SDValue(); // empty struct
1540       SmallVector<SDValue, 4> Constants(NumElts);
1541       for (unsigned i = 0; i != NumElts; ++i) {
1542         EVT EltVT = ValueVTs[i];
1543         if (isa<UndefValue>(C))
1544           Constants[i] = DAG.getUNDEF(EltVT);
1545         else if (EltVT.isFloatingPoint())
1546           Constants[i] = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1547         else
1548           Constants[i] = DAG.getConstant(0, getCurSDLoc(), EltVT);
1549       }
1550 
1551       return DAG.getMergeValues(Constants, getCurSDLoc());
1552     }
1553 
1554     if (const BlockAddress *BA = dyn_cast<BlockAddress>(C))
1555       return DAG.getBlockAddress(BA, VT);
1556 
1557     VectorType *VecTy = cast<VectorType>(V->getType());
1558 
1559     // Now that we know the number and type of the elements, get that number of
1560     // elements into the Ops array based on what kind of constant it is.
1561     if (const ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
1562       SmallVector<SDValue, 16> Ops;
1563       unsigned NumElements = cast<FixedVectorType>(VecTy)->getNumElements();
1564       for (unsigned i = 0; i != NumElements; ++i)
1565         Ops.push_back(getValue(CV->getOperand(i)));
1566 
1567       return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1568     } else if (isa<ConstantAggregateZero>(C)) {
1569       EVT EltVT =
1570           TLI.getValueType(DAG.getDataLayout(), VecTy->getElementType());
1571 
1572       SDValue Op;
1573       if (EltVT.isFloatingPoint())
1574         Op = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1575       else
1576         Op = DAG.getConstant(0, getCurSDLoc(), EltVT);
1577 
1578       if (isa<ScalableVectorType>(VecTy))
1579         return NodeMap[V] = DAG.getSplatVector(VT, getCurSDLoc(), Op);
1580       else {
1581         SmallVector<SDValue, 16> Ops;
1582         Ops.assign(cast<FixedVectorType>(VecTy)->getNumElements(), Op);
1583         return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1584       }
1585     }
1586     llvm_unreachable("Unknown vector constant");
1587   }
1588 
1589   // If this is a static alloca, generate it as the frameindex instead of
1590   // computation.
1591   if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1592     DenseMap<const AllocaInst*, int>::iterator SI =
1593       FuncInfo.StaticAllocaMap.find(AI);
1594     if (SI != FuncInfo.StaticAllocaMap.end())
1595       return DAG.getFrameIndex(SI->second,
1596                                TLI.getFrameIndexTy(DAG.getDataLayout()));
1597   }
1598 
1599   // If this is an instruction which fast-isel has deferred, select it now.
1600   if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
1601     unsigned InReg = FuncInfo.InitializeRegForValue(Inst);
1602 
1603     RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg,
1604                      Inst->getType(), getABIRegCopyCC(V));
1605     SDValue Chain = DAG.getEntryNode();
1606     return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
1607   }
1608 
1609   if (const MetadataAsValue *MD = dyn_cast<MetadataAsValue>(V)) {
1610     return DAG.getMDNode(cast<MDNode>(MD->getMetadata()));
1611   }
1612   llvm_unreachable("Can't get register for value!");
1613 }
1614 
1615 void SelectionDAGBuilder::visitCatchPad(const CatchPadInst &I) {
1616   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1617   bool IsMSVCCXX = Pers == EHPersonality::MSVC_CXX;
1618   bool IsCoreCLR = Pers == EHPersonality::CoreCLR;
1619   bool IsSEH = isAsynchronousEHPersonality(Pers);
1620   MachineBasicBlock *CatchPadMBB = FuncInfo.MBB;
1621   if (!IsSEH)
1622     CatchPadMBB->setIsEHScopeEntry();
1623   // In MSVC C++ and CoreCLR, catchblocks are funclets and need prologues.
1624   if (IsMSVCCXX || IsCoreCLR)
1625     CatchPadMBB->setIsEHFuncletEntry();
1626 }
1627 
1628 void SelectionDAGBuilder::visitCatchRet(const CatchReturnInst &I) {
1629   // Update machine-CFG edge.
1630   MachineBasicBlock *TargetMBB = FuncInfo.MBBMap[I.getSuccessor()];
1631   FuncInfo.MBB->addSuccessor(TargetMBB);
1632 
1633   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1634   bool IsSEH = isAsynchronousEHPersonality(Pers);
1635   if (IsSEH) {
1636     // If this is not a fall-through branch or optimizations are switched off,
1637     // emit the branch.
1638     if (TargetMBB != NextBlock(FuncInfo.MBB) ||
1639         TM.getOptLevel() == CodeGenOpt::None)
1640       DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
1641                               getControlRoot(), DAG.getBasicBlock(TargetMBB)));
1642     return;
1643   }
1644 
1645   // Figure out the funclet membership for the catchret's successor.
1646   // This will be used by the FuncletLayout pass to determine how to order the
1647   // BB's.
1648   // A 'catchret' returns to the outer scope's color.
1649   Value *ParentPad = I.getCatchSwitchParentPad();
1650   const BasicBlock *SuccessorColor;
1651   if (isa<ConstantTokenNone>(ParentPad))
1652     SuccessorColor = &FuncInfo.Fn->getEntryBlock();
1653   else
1654     SuccessorColor = cast<Instruction>(ParentPad)->getParent();
1655   assert(SuccessorColor && "No parent funclet for catchret!");
1656   MachineBasicBlock *SuccessorColorMBB = FuncInfo.MBBMap[SuccessorColor];
1657   assert(SuccessorColorMBB && "No MBB for SuccessorColor!");
1658 
1659   // Create the terminator node.
1660   SDValue Ret = DAG.getNode(ISD::CATCHRET, getCurSDLoc(), MVT::Other,
1661                             getControlRoot(), DAG.getBasicBlock(TargetMBB),
1662                             DAG.getBasicBlock(SuccessorColorMBB));
1663   DAG.setRoot(Ret);
1664 }
1665 
1666 void SelectionDAGBuilder::visitCleanupPad(const CleanupPadInst &CPI) {
1667   // Don't emit any special code for the cleanuppad instruction. It just marks
1668   // the start of an EH scope/funclet.
1669   FuncInfo.MBB->setIsEHScopeEntry();
1670   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1671   if (Pers != EHPersonality::Wasm_CXX) {
1672     FuncInfo.MBB->setIsEHFuncletEntry();
1673     FuncInfo.MBB->setIsCleanupFuncletEntry();
1674   }
1675 }
1676 
1677 // For wasm, there's alwyas a single catch pad attached to a catchswitch, and
1678 // the control flow always stops at the single catch pad, as it does for a
1679 // cleanup pad. In case the exception caught is not of the types the catch pad
1680 // catches, it will be rethrown by a rethrow.
1681 static void findWasmUnwindDestinations(
1682     FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
1683     BranchProbability Prob,
1684     SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
1685         &UnwindDests) {
1686   while (EHPadBB) {
1687     const Instruction *Pad = EHPadBB->getFirstNonPHI();
1688     if (isa<CleanupPadInst>(Pad)) {
1689       // Stop on cleanup pads.
1690       UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1691       UnwindDests.back().first->setIsEHScopeEntry();
1692       break;
1693     } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
1694       // Add the catchpad handlers to the possible destinations. We don't
1695       // continue to the unwind destination of the catchswitch for wasm.
1696       for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
1697         UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob);
1698         UnwindDests.back().first->setIsEHScopeEntry();
1699       }
1700       break;
1701     } else {
1702       continue;
1703     }
1704   }
1705 }
1706 
1707 /// When an invoke or a cleanupret unwinds to the next EH pad, there are
1708 /// many places it could ultimately go. In the IR, we have a single unwind
1709 /// destination, but in the machine CFG, we enumerate all the possible blocks.
1710 /// This function skips over imaginary basic blocks that hold catchswitch
1711 /// instructions, and finds all the "real" machine
1712 /// basic block destinations. As those destinations may not be successors of
1713 /// EHPadBB, here we also calculate the edge probability to those destinations.
1714 /// The passed-in Prob is the edge probability to EHPadBB.
1715 static void findUnwindDestinations(
1716     FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
1717     BranchProbability Prob,
1718     SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
1719         &UnwindDests) {
1720   EHPersonality Personality =
1721     classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1722   bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX;
1723   bool IsCoreCLR = Personality == EHPersonality::CoreCLR;
1724   bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX;
1725   bool IsSEH = isAsynchronousEHPersonality(Personality);
1726 
1727   if (IsWasmCXX) {
1728     findWasmUnwindDestinations(FuncInfo, EHPadBB, Prob, UnwindDests);
1729     assert(UnwindDests.size() <= 1 &&
1730            "There should be at most one unwind destination for wasm");
1731     return;
1732   }
1733 
1734   while (EHPadBB) {
1735     const Instruction *Pad = EHPadBB->getFirstNonPHI();
1736     BasicBlock *NewEHPadBB = nullptr;
1737     if (isa<LandingPadInst>(Pad)) {
1738       // Stop on landingpads. They are not funclets.
1739       UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1740       break;
1741     } else if (isa<CleanupPadInst>(Pad)) {
1742       // Stop on cleanup pads. Cleanups are always funclet entries for all known
1743       // personalities.
1744       UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1745       UnwindDests.back().first->setIsEHScopeEntry();
1746       UnwindDests.back().first->setIsEHFuncletEntry();
1747       break;
1748     } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
1749       // Add the catchpad handlers to the possible destinations.
1750       for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
1751         UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob);
1752         // For MSVC++ and the CLR, catchblocks are funclets and need prologues.
1753         if (IsMSVCCXX || IsCoreCLR)
1754           UnwindDests.back().first->setIsEHFuncletEntry();
1755         if (!IsSEH)
1756           UnwindDests.back().first->setIsEHScopeEntry();
1757       }
1758       NewEHPadBB = CatchSwitch->getUnwindDest();
1759     } else {
1760       continue;
1761     }
1762 
1763     BranchProbabilityInfo *BPI = FuncInfo.BPI;
1764     if (BPI && NewEHPadBB)
1765       Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB);
1766     EHPadBB = NewEHPadBB;
1767   }
1768 }
1769 
1770 void SelectionDAGBuilder::visitCleanupRet(const CleanupReturnInst &I) {
1771   // Update successor info.
1772   SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
1773   auto UnwindDest = I.getUnwindDest();
1774   BranchProbabilityInfo *BPI = FuncInfo.BPI;
1775   BranchProbability UnwindDestProb =
1776       (BPI && UnwindDest)
1777           ? BPI->getEdgeProbability(FuncInfo.MBB->getBasicBlock(), UnwindDest)
1778           : BranchProbability::getZero();
1779   findUnwindDestinations(FuncInfo, UnwindDest, UnwindDestProb, UnwindDests);
1780   for (auto &UnwindDest : UnwindDests) {
1781     UnwindDest.first->setIsEHPad();
1782     addSuccessorWithProb(FuncInfo.MBB, UnwindDest.first, UnwindDest.second);
1783   }
1784   FuncInfo.MBB->normalizeSuccProbs();
1785 
1786   // Create the terminator node.
1787   SDValue Ret =
1788       DAG.getNode(ISD::CLEANUPRET, getCurSDLoc(), MVT::Other, getControlRoot());
1789   DAG.setRoot(Ret);
1790 }
1791 
1792 void SelectionDAGBuilder::visitCatchSwitch(const CatchSwitchInst &CSI) {
1793   report_fatal_error("visitCatchSwitch not yet implemented!");
1794 }
1795 
1796 void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
1797   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1798   auto &DL = DAG.getDataLayout();
1799   SDValue Chain = getControlRoot();
1800   SmallVector<ISD::OutputArg, 8> Outs;
1801   SmallVector<SDValue, 8> OutVals;
1802 
1803   // Calls to @llvm.experimental.deoptimize don't generate a return value, so
1804   // lower
1805   //
1806   //   %val = call <ty> @llvm.experimental.deoptimize()
1807   //   ret <ty> %val
1808   //
1809   // differently.
1810   if (I.getParent()->getTerminatingDeoptimizeCall()) {
1811     LowerDeoptimizingReturn();
1812     return;
1813   }
1814 
1815   if (!FuncInfo.CanLowerReturn) {
1816     unsigned DemoteReg = FuncInfo.DemoteRegister;
1817     const Function *F = I.getParent()->getParent();
1818 
1819     // Emit a store of the return value through the virtual register.
1820     // Leave Outs empty so that LowerReturn won't try to load return
1821     // registers the usual way.
1822     SmallVector<EVT, 1> PtrValueVTs;
1823     ComputeValueVTs(TLI, DL,
1824                     F->getReturnType()->getPointerTo(
1825                         DAG.getDataLayout().getAllocaAddrSpace()),
1826                     PtrValueVTs);
1827 
1828     SDValue RetPtr = DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(),
1829                                         DemoteReg, PtrValueVTs[0]);
1830     SDValue RetOp = getValue(I.getOperand(0));
1831 
1832     SmallVector<EVT, 4> ValueVTs, MemVTs;
1833     SmallVector<uint64_t, 4> Offsets;
1834     ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs, &MemVTs,
1835                     &Offsets);
1836     unsigned NumValues = ValueVTs.size();
1837 
1838     SmallVector<SDValue, 4> Chains(NumValues);
1839     Align BaseAlign = DL.getPrefTypeAlign(I.getOperand(0)->getType());
1840     for (unsigned i = 0; i != NumValues; ++i) {
1841       // An aggregate return value cannot wrap around the address space, so
1842       // offsets to its parts don't wrap either.
1843       SDValue Ptr = DAG.getObjectPtrOffset(getCurSDLoc(), RetPtr, Offsets[i]);
1844 
1845       SDValue Val = RetOp.getValue(RetOp.getResNo() + i);
1846       if (MemVTs[i] != ValueVTs[i])
1847         Val = DAG.getPtrExtOrTrunc(Val, getCurSDLoc(), MemVTs[i]);
1848       Chains[i] = DAG.getStore(
1849           Chain, getCurSDLoc(), Val,
1850           // FIXME: better loc info would be nice.
1851           Ptr, MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()),
1852           commonAlignment(BaseAlign, Offsets[i]));
1853     }
1854 
1855     Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
1856                         MVT::Other, Chains);
1857   } else if (I.getNumOperands() != 0) {
1858     SmallVector<EVT, 4> ValueVTs;
1859     ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs);
1860     unsigned NumValues = ValueVTs.size();
1861     if (NumValues) {
1862       SDValue RetOp = getValue(I.getOperand(0));
1863 
1864       const Function *F = I.getParent()->getParent();
1865 
1866       bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
1867           I.getOperand(0)->getType(), F->getCallingConv(),
1868           /*IsVarArg*/ false);
1869 
1870       ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
1871       if (F->getAttributes().hasAttribute(AttributeList::ReturnIndex,
1872                                           Attribute::SExt))
1873         ExtendKind = ISD::SIGN_EXTEND;
1874       else if (F->getAttributes().hasAttribute(AttributeList::ReturnIndex,
1875                                                Attribute::ZExt))
1876         ExtendKind = ISD::ZERO_EXTEND;
1877 
1878       LLVMContext &Context = F->getContext();
1879       bool RetInReg = F->getAttributes().hasAttribute(
1880           AttributeList::ReturnIndex, Attribute::InReg);
1881 
1882       for (unsigned j = 0; j != NumValues; ++j) {
1883         EVT VT = ValueVTs[j];
1884 
1885         if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
1886           VT = TLI.getTypeForExtReturn(Context, VT, ExtendKind);
1887 
1888         CallingConv::ID CC = F->getCallingConv();
1889 
1890         unsigned NumParts = TLI.getNumRegistersForCallingConv(Context, CC, VT);
1891         MVT PartVT = TLI.getRegisterTypeForCallingConv(Context, CC, VT);
1892         SmallVector<SDValue, 4> Parts(NumParts);
1893         getCopyToParts(DAG, getCurSDLoc(),
1894                        SDValue(RetOp.getNode(), RetOp.getResNo() + j),
1895                        &Parts[0], NumParts, PartVT, &I, CC, ExtendKind);
1896 
1897         // 'inreg' on function refers to return value
1898         ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1899         if (RetInReg)
1900           Flags.setInReg();
1901 
1902         if (I.getOperand(0)->getType()->isPointerTy()) {
1903           Flags.setPointer();
1904           Flags.setPointerAddrSpace(
1905               cast<PointerType>(I.getOperand(0)->getType())->getAddressSpace());
1906         }
1907 
1908         if (NeedsRegBlock) {
1909           Flags.setInConsecutiveRegs();
1910           if (j == NumValues - 1)
1911             Flags.setInConsecutiveRegsLast();
1912         }
1913 
1914         // Propagate extension type if any
1915         if (ExtendKind == ISD::SIGN_EXTEND)
1916           Flags.setSExt();
1917         else if (ExtendKind == ISD::ZERO_EXTEND)
1918           Flags.setZExt();
1919 
1920         for (unsigned i = 0; i < NumParts; ++i) {
1921           Outs.push_back(ISD::OutputArg(Flags, Parts[i].getValueType(),
1922                                         VT, /*isfixed=*/true, 0, 0));
1923           OutVals.push_back(Parts[i]);
1924         }
1925       }
1926     }
1927   }
1928 
1929   // Push in swifterror virtual register as the last element of Outs. This makes
1930   // sure swifterror virtual register will be returned in the swifterror
1931   // physical register.
1932   const Function *F = I.getParent()->getParent();
1933   if (TLI.supportSwiftError() &&
1934       F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
1935     assert(SwiftError.getFunctionArg() && "Need a swift error argument");
1936     ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1937     Flags.setSwiftError();
1938     Outs.push_back(ISD::OutputArg(Flags, EVT(TLI.getPointerTy(DL)) /*vt*/,
1939                                   EVT(TLI.getPointerTy(DL)) /*argvt*/,
1940                                   true /*isfixed*/, 1 /*origidx*/,
1941                                   0 /*partOffs*/));
1942     // Create SDNode for the swifterror virtual register.
1943     OutVals.push_back(
1944         DAG.getRegister(SwiftError.getOrCreateVRegUseAt(
1945                             &I, FuncInfo.MBB, SwiftError.getFunctionArg()),
1946                         EVT(TLI.getPointerTy(DL))));
1947   }
1948 
1949   bool isVarArg = DAG.getMachineFunction().getFunction().isVarArg();
1950   CallingConv::ID CallConv =
1951     DAG.getMachineFunction().getFunction().getCallingConv();
1952   Chain = DAG.getTargetLoweringInfo().LowerReturn(
1953       Chain, CallConv, isVarArg, Outs, OutVals, getCurSDLoc(), DAG);
1954 
1955   // Verify that the target's LowerReturn behaved as expected.
1956   assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
1957          "LowerReturn didn't return a valid chain!");
1958 
1959   // Update the DAG with the new chain value resulting from return lowering.
1960   DAG.setRoot(Chain);
1961 }
1962 
1963 /// CopyToExportRegsIfNeeded - If the given value has virtual registers
1964 /// created for it, emit nodes to copy the value into the virtual
1965 /// registers.
1966 void SelectionDAGBuilder::CopyToExportRegsIfNeeded(const Value *V) {
1967   // Skip empty types
1968   if (V->getType()->isEmptyTy())
1969     return;
1970 
1971   DenseMap<const Value *, Register>::iterator VMI = FuncInfo.ValueMap.find(V);
1972   if (VMI != FuncInfo.ValueMap.end()) {
1973     assert(!V->use_empty() && "Unused value assigned virtual registers!");
1974     CopyValueToVirtualRegister(V, VMI->second);
1975   }
1976 }
1977 
1978 /// ExportFromCurrentBlock - If this condition isn't known to be exported from
1979 /// the current basic block, add it to ValueMap now so that we'll get a
1980 /// CopyTo/FromReg.
1981 void SelectionDAGBuilder::ExportFromCurrentBlock(const Value *V) {
1982   // No need to export constants.
1983   if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
1984 
1985   // Already exported?
1986   if (FuncInfo.isExportedInst(V)) return;
1987 
1988   unsigned Reg = FuncInfo.InitializeRegForValue(V);
1989   CopyValueToVirtualRegister(V, Reg);
1990 }
1991 
1992 bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V,
1993                                                      const BasicBlock *FromBB) {
1994   // The operands of the setcc have to be in this block.  We don't know
1995   // how to export them from some other block.
1996   if (const Instruction *VI = dyn_cast<Instruction>(V)) {
1997     // Can export from current BB.
1998     if (VI->getParent() == FromBB)
1999       return true;
2000 
2001     // Is already exported, noop.
2002     return FuncInfo.isExportedInst(V);
2003   }
2004 
2005   // If this is an argument, we can export it if the BB is the entry block or
2006   // if it is already exported.
2007   if (isa<Argument>(V)) {
2008     if (FromBB == &FromBB->getParent()->getEntryBlock())
2009       return true;
2010 
2011     // Otherwise, can only export this if it is already exported.
2012     return FuncInfo.isExportedInst(V);
2013   }
2014 
2015   // Otherwise, constants can always be exported.
2016   return true;
2017 }
2018 
2019 /// Return branch probability calculated by BranchProbabilityInfo for IR blocks.
2020 BranchProbability
2021 SelectionDAGBuilder::getEdgeProbability(const MachineBasicBlock *Src,
2022                                         const MachineBasicBlock *Dst) const {
2023   BranchProbabilityInfo *BPI = FuncInfo.BPI;
2024   const BasicBlock *SrcBB = Src->getBasicBlock();
2025   const BasicBlock *DstBB = Dst->getBasicBlock();
2026   if (!BPI) {
2027     // If BPI is not available, set the default probability as 1 / N, where N is
2028     // the number of successors.
2029     auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
2030     return BranchProbability(1, SuccSize);
2031   }
2032   return BPI->getEdgeProbability(SrcBB, DstBB);
2033 }
2034 
2035 void SelectionDAGBuilder::addSuccessorWithProb(MachineBasicBlock *Src,
2036                                                MachineBasicBlock *Dst,
2037                                                BranchProbability Prob) {
2038   if (!FuncInfo.BPI)
2039     Src->addSuccessorWithoutProb(Dst);
2040   else {
2041     if (Prob.isUnknown())
2042       Prob = getEdgeProbability(Src, Dst);
2043     Src->addSuccessor(Dst, Prob);
2044   }
2045 }
2046 
2047 static bool InBlock(const Value *V, const BasicBlock *BB) {
2048   if (const Instruction *I = dyn_cast<Instruction>(V))
2049     return I->getParent() == BB;
2050   return true;
2051 }
2052 
2053 /// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
2054 /// This function emits a branch and is used at the leaves of an OR or an
2055 /// AND operator tree.
2056 void
2057 SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
2058                                                   MachineBasicBlock *TBB,
2059                                                   MachineBasicBlock *FBB,
2060                                                   MachineBasicBlock *CurBB,
2061                                                   MachineBasicBlock *SwitchBB,
2062                                                   BranchProbability TProb,
2063                                                   BranchProbability FProb,
2064                                                   bool InvertCond) {
2065   const BasicBlock *BB = CurBB->getBasicBlock();
2066 
2067   // If the leaf of the tree is a comparison, merge the condition into
2068   // the caseblock.
2069   if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
2070     // The operands of the cmp have to be in this block.  We don't know
2071     // how to export them from some other block.  If this is the first block
2072     // of the sequence, no exporting is needed.
2073     if (CurBB == SwitchBB ||
2074         (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
2075          isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
2076       ISD::CondCode Condition;
2077       if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
2078         ICmpInst::Predicate Pred =
2079             InvertCond ? IC->getInversePredicate() : IC->getPredicate();
2080         Condition = getICmpCondCode(Pred);
2081       } else {
2082         const FCmpInst *FC = cast<FCmpInst>(Cond);
2083         FCmpInst::Predicate Pred =
2084             InvertCond ? FC->getInversePredicate() : FC->getPredicate();
2085         Condition = getFCmpCondCode(Pred);
2086         if (TM.Options.NoNaNsFPMath)
2087           Condition = getFCmpCodeWithoutNaN(Condition);
2088       }
2089 
2090       CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1), nullptr,
2091                    TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
2092       SL->SwitchCases.push_back(CB);
2093       return;
2094     }
2095   }
2096 
2097   // Create a CaseBlock record representing this branch.
2098   ISD::CondCode Opc = InvertCond ? ISD::SETNE : ISD::SETEQ;
2099   CaseBlock CB(Opc, Cond, ConstantInt::getTrue(*DAG.getContext()),
2100                nullptr, TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
2101   SL->SwitchCases.push_back(CB);
2102 }
2103 
2104 void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
2105                                                MachineBasicBlock *TBB,
2106                                                MachineBasicBlock *FBB,
2107                                                MachineBasicBlock *CurBB,
2108                                                MachineBasicBlock *SwitchBB,
2109                                                Instruction::BinaryOps Opc,
2110                                                BranchProbability TProb,
2111                                                BranchProbability FProb,
2112                                                bool InvertCond) {
2113   // Skip over not part of the tree and remember to invert op and operands at
2114   // next level.
2115   Value *NotCond;
2116   if (match(Cond, m_OneUse(m_Not(m_Value(NotCond)))) &&
2117       InBlock(NotCond, CurBB->getBasicBlock())) {
2118     FindMergedConditions(NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
2119                          !InvertCond);
2120     return;
2121   }
2122 
2123   const Instruction *BOp = dyn_cast<Instruction>(Cond);
2124   // Compute the effective opcode for Cond, taking into account whether it needs
2125   // to be inverted, e.g.
2126   //   and (not (or A, B)), C
2127   // gets lowered as
2128   //   and (and (not A, not B), C)
2129   unsigned BOpc = 0;
2130   if (BOp) {
2131     BOpc = BOp->getOpcode();
2132     if (InvertCond) {
2133       if (BOpc == Instruction::And)
2134         BOpc = Instruction::Or;
2135       else if (BOpc == Instruction::Or)
2136         BOpc = Instruction::And;
2137     }
2138   }
2139 
2140   // If this node is not part of the or/and tree, emit it as a branch.
2141   if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) ||
2142       BOpc != unsigned(Opc) || !BOp->hasOneUse() ||
2143       BOp->getParent() != CurBB->getBasicBlock() ||
2144       !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) ||
2145       !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) {
2146     EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB,
2147                                  TProb, FProb, InvertCond);
2148     return;
2149   }
2150 
2151   //  Create TmpBB after CurBB.
2152   MachineFunction::iterator BBI(CurBB);
2153   MachineFunction &MF = DAG.getMachineFunction();
2154   MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
2155   CurBB->getParent()->insert(++BBI, TmpBB);
2156 
2157   if (Opc == Instruction::Or) {
2158     // Codegen X | Y as:
2159     // BB1:
2160     //   jmp_if_X TBB
2161     //   jmp TmpBB
2162     // TmpBB:
2163     //   jmp_if_Y TBB
2164     //   jmp FBB
2165     //
2166 
2167     // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
2168     // The requirement is that
2169     //   TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
2170     //     = TrueProb for original BB.
2171     // Assuming the original probabilities are A and B, one choice is to set
2172     // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
2173     // A/(1+B) and 2B/(1+B). This choice assumes that
2174     //   TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
2175     // Another choice is to assume TrueProb for BB1 equals to TrueProb for
2176     // TmpBB, but the math is more complicated.
2177 
2178     auto NewTrueProb = TProb / 2;
2179     auto NewFalseProb = TProb / 2 + FProb;
2180     // Emit the LHS condition.
2181     FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, SwitchBB, Opc,
2182                          NewTrueProb, NewFalseProb, InvertCond);
2183 
2184     // Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
2185     SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb};
2186     BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
2187     // Emit the RHS condition into TmpBB.
2188     FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc,
2189                          Probs[0], Probs[1], InvertCond);
2190   } else {
2191     assert(Opc == Instruction::And && "Unknown merge op!");
2192     // Codegen X & Y as:
2193     // BB1:
2194     //   jmp_if_X TmpBB
2195     //   jmp FBB
2196     // TmpBB:
2197     //   jmp_if_Y TBB
2198     //   jmp FBB
2199     //
2200     //  This requires creation of TmpBB after CurBB.
2201 
2202     // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
2203     // The requirement is that
2204     //   FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
2205     //     = FalseProb for original BB.
2206     // Assuming the original probabilities are A and B, one choice is to set
2207     // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
2208     // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
2209     // TrueProb for BB1 * FalseProb for TmpBB.
2210 
2211     auto NewTrueProb = TProb + FProb / 2;
2212     auto NewFalseProb = FProb / 2;
2213     // Emit the LHS condition.
2214     FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, SwitchBB, Opc,
2215                          NewTrueProb, NewFalseProb, InvertCond);
2216 
2217     // Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
2218     SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2};
2219     BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
2220     // Emit the RHS condition into TmpBB.
2221     FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc,
2222                          Probs[0], Probs[1], InvertCond);
2223   }
2224 }
2225 
2226 /// If the set of cases should be emitted as a series of branches, return true.
2227 /// If we should emit this as a bunch of and/or'd together conditions, return
2228 /// false.
2229 bool
2230 SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases) {
2231   if (Cases.size() != 2) return true;
2232 
2233   // If this is two comparisons of the same values or'd or and'd together, they
2234   // will get folded into a single comparison, so don't emit two blocks.
2235   if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
2236        Cases[0].CmpRHS == Cases[1].CmpRHS) ||
2237       (Cases[0].CmpRHS == Cases[1].CmpLHS &&
2238        Cases[0].CmpLHS == Cases[1].CmpRHS)) {
2239     return false;
2240   }
2241 
2242   // Handle: (X != null) | (Y != null) --> (X|Y) != 0
2243   // Handle: (X == null) & (Y == null) --> (X|Y) == 0
2244   if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
2245       Cases[0].CC == Cases[1].CC &&
2246       isa<Constant>(Cases[0].CmpRHS) &&
2247       cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
2248     if (Cases[0].CC == ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
2249       return false;
2250     if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
2251       return false;
2252   }
2253 
2254   return true;
2255 }
2256 
2257 void SelectionDAGBuilder::visitBr(const BranchInst &I) {
2258   MachineBasicBlock *BrMBB = FuncInfo.MBB;
2259 
2260   // Update machine-CFG edges.
2261   MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
2262 
2263   if (I.isUnconditional()) {
2264     // Update machine-CFG edges.
2265     BrMBB->addSuccessor(Succ0MBB);
2266 
2267     // If this is not a fall-through branch or optimizations are switched off,
2268     // emit the branch.
2269     if (Succ0MBB != NextBlock(BrMBB) || TM.getOptLevel() == CodeGenOpt::None)
2270       DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
2271                               MVT::Other, getControlRoot(),
2272                               DAG.getBasicBlock(Succ0MBB)));
2273 
2274     return;
2275   }
2276 
2277   // If this condition is one of the special cases we handle, do special stuff
2278   // now.
2279   const Value *CondVal = I.getCondition();
2280   MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
2281 
2282   // If this is a series of conditions that are or'd or and'd together, emit
2283   // this as a sequence of branches instead of setcc's with and/or operations.
2284   // As long as jumps are not expensive, this should improve performance.
2285   // For example, instead of something like:
2286   //     cmp A, B
2287   //     C = seteq
2288   //     cmp D, E
2289   //     F = setle
2290   //     or C, F
2291   //     jnz foo
2292   // Emit:
2293   //     cmp A, B
2294   //     je foo
2295   //     cmp D, E
2296   //     jle foo
2297   if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
2298     Instruction::BinaryOps Opcode = BOp->getOpcode();
2299     if (!DAG.getTargetLoweringInfo().isJumpExpensive() && BOp->hasOneUse() &&
2300         !I.hasMetadata(LLVMContext::MD_unpredictable) &&
2301         (Opcode == Instruction::And || Opcode == Instruction::Or)) {
2302       FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB,
2303                            Opcode,
2304                            getEdgeProbability(BrMBB, Succ0MBB),
2305                            getEdgeProbability(BrMBB, Succ1MBB),
2306                            /*InvertCond=*/false);
2307       // If the compares in later blocks need to use values not currently
2308       // exported from this block, export them now.  This block should always
2309       // be the first entry.
2310       assert(SL->SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!");
2311 
2312       // Allow some cases to be rejected.
2313       if (ShouldEmitAsBranches(SL->SwitchCases)) {
2314         for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i) {
2315           ExportFromCurrentBlock(SL->SwitchCases[i].CmpLHS);
2316           ExportFromCurrentBlock(SL->SwitchCases[i].CmpRHS);
2317         }
2318 
2319         // Emit the branch for this block.
2320         visitSwitchCase(SL->SwitchCases[0], BrMBB);
2321         SL->SwitchCases.erase(SL->SwitchCases.begin());
2322         return;
2323       }
2324 
2325       // Okay, we decided not to do this, remove any inserted MBB's and clear
2326       // SwitchCases.
2327       for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i)
2328         FuncInfo.MF->erase(SL->SwitchCases[i].ThisBB);
2329 
2330       SL->SwitchCases.clear();
2331     }
2332   }
2333 
2334   // Create a CaseBlock record representing this branch.
2335   CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
2336                nullptr, Succ0MBB, Succ1MBB, BrMBB, getCurSDLoc());
2337 
2338   // Use visitSwitchCase to actually insert the fast branch sequence for this
2339   // cond branch.
2340   visitSwitchCase(CB, BrMBB);
2341 }
2342 
2343 /// visitSwitchCase - Emits the necessary code to represent a single node in
2344 /// the binary search tree resulting from lowering a switch instruction.
2345 void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB,
2346                                           MachineBasicBlock *SwitchBB) {
2347   SDValue Cond;
2348   SDValue CondLHS = getValue(CB.CmpLHS);
2349   SDLoc dl = CB.DL;
2350 
2351   if (CB.CC == ISD::SETTRUE) {
2352     // Branch or fall through to TrueBB.
2353     addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
2354     SwitchBB->normalizeSuccProbs();
2355     if (CB.TrueBB != NextBlock(SwitchBB)) {
2356       DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, getControlRoot(),
2357                               DAG.getBasicBlock(CB.TrueBB)));
2358     }
2359     return;
2360   }
2361 
2362   auto &TLI = DAG.getTargetLoweringInfo();
2363   EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), CB.CmpLHS->getType());
2364 
2365   // Build the setcc now.
2366   if (!CB.CmpMHS) {
2367     // Fold "(X == true)" to X and "(X == false)" to !X to
2368     // handle common cases produced by branch lowering.
2369     if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
2370         CB.CC == ISD::SETEQ)
2371       Cond = CondLHS;
2372     else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) &&
2373              CB.CC == ISD::SETEQ) {
2374       SDValue True = DAG.getConstant(1, dl, CondLHS.getValueType());
2375       Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
2376     } else {
2377       SDValue CondRHS = getValue(CB.CmpRHS);
2378 
2379       // If a pointer's DAG type is larger than its memory type then the DAG
2380       // values are zero-extended. This breaks signed comparisons so truncate
2381       // back to the underlying type before doing the compare.
2382       if (CondLHS.getValueType() != MemVT) {
2383         CondLHS = DAG.getPtrExtOrTrunc(CondLHS, getCurSDLoc(), MemVT);
2384         CondRHS = DAG.getPtrExtOrTrunc(CondRHS, getCurSDLoc(), MemVT);
2385       }
2386       Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, CondRHS, CB.CC);
2387     }
2388   } else {
2389     assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
2390 
2391     const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
2392     const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
2393 
2394     SDValue CmpOp = getValue(CB.CmpMHS);
2395     EVT VT = CmpOp.getValueType();
2396 
2397     if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
2398       Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, dl, VT),
2399                           ISD::SETLE);
2400     } else {
2401       SDValue SUB = DAG.getNode(ISD::SUB, dl,
2402                                 VT, CmpOp, DAG.getConstant(Low, dl, VT));
2403       Cond = DAG.getSetCC(dl, MVT::i1, SUB,
2404                           DAG.getConstant(High-Low, dl, VT), ISD::SETULE);
2405     }
2406   }
2407 
2408   // Update successor info
2409   addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
2410   // TrueBB and FalseBB are always different unless the incoming IR is
2411   // degenerate. This only happens when running llc on weird IR.
2412   if (CB.TrueBB != CB.FalseBB)
2413     addSuccessorWithProb(SwitchBB, CB.FalseBB, CB.FalseProb);
2414   SwitchBB->normalizeSuccProbs();
2415 
2416   // If the lhs block is the next block, invert the condition so that we can
2417   // fall through to the lhs instead of the rhs block.
2418   if (CB.TrueBB == NextBlock(SwitchBB)) {
2419     std::swap(CB.TrueBB, CB.FalseBB);
2420     SDValue True = DAG.getConstant(1, dl, Cond.getValueType());
2421     Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
2422   }
2423 
2424   SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2425                                MVT::Other, getControlRoot(), Cond,
2426                                DAG.getBasicBlock(CB.TrueBB));
2427 
2428   // Insert the false branch. Do this even if it's a fall through branch,
2429   // this makes it easier to do DAG optimizations which require inverting
2430   // the branch condition.
2431   BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2432                        DAG.getBasicBlock(CB.FalseBB));
2433 
2434   DAG.setRoot(BrCond);
2435 }
2436 
2437 /// visitJumpTable - Emit JumpTable node in the current MBB
2438 void SelectionDAGBuilder::visitJumpTable(SwitchCG::JumpTable &JT) {
2439   // Emit the code for the jump table
2440   assert(JT.Reg != -1U && "Should lower JT Header first!");
2441   EVT PTy = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
2442   SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurSDLoc(),
2443                                      JT.Reg, PTy);
2444   SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
2445   SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, getCurSDLoc(),
2446                                     MVT::Other, Index.getValue(1),
2447                                     Table, Index);
2448   DAG.setRoot(BrJumpTable);
2449 }
2450 
2451 /// visitJumpTableHeader - This function emits necessary code to produce index
2452 /// in the JumpTable from switch case.
2453 void SelectionDAGBuilder::visitJumpTableHeader(SwitchCG::JumpTable &JT,
2454                                                JumpTableHeader &JTH,
2455                                                MachineBasicBlock *SwitchBB) {
2456   SDLoc dl = getCurSDLoc();
2457 
2458   // Subtract the lowest switch case value from the value being switched on.
2459   SDValue SwitchOp = getValue(JTH.SValue);
2460   EVT VT = SwitchOp.getValueType();
2461   SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp,
2462                             DAG.getConstant(JTH.First, dl, VT));
2463 
2464   // The SDNode we just created, which holds the value being switched on minus
2465   // the smallest case value, needs to be copied to a virtual register so it
2466   // can be used as an index into the jump table in a subsequent basic block.
2467   // This value may be smaller or larger than the target's pointer type, and
2468   // therefore require extension or truncating.
2469   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2470   SwitchOp = DAG.getZExtOrTrunc(Sub, dl, TLI.getPointerTy(DAG.getDataLayout()));
2471 
2472   unsigned JumpTableReg =
2473       FuncInfo.CreateReg(TLI.getPointerTy(DAG.getDataLayout()));
2474   SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl,
2475                                     JumpTableReg, SwitchOp);
2476   JT.Reg = JumpTableReg;
2477 
2478   if (!JTH.OmitRangeCheck) {
2479     // Emit the range check for the jump table, and branch to the default block
2480     // for the switch statement if the value being switched on exceeds the
2481     // largest case in the switch.
2482     SDValue CMP = DAG.getSetCC(
2483         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
2484                                    Sub.getValueType()),
2485         Sub, DAG.getConstant(JTH.Last - JTH.First, dl, VT), ISD::SETUGT);
2486 
2487     SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2488                                  MVT::Other, CopyTo, CMP,
2489                                  DAG.getBasicBlock(JT.Default));
2490 
2491     // Avoid emitting unnecessary branches to the next block.
2492     if (JT.MBB != NextBlock(SwitchBB))
2493       BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2494                            DAG.getBasicBlock(JT.MBB));
2495 
2496     DAG.setRoot(BrCond);
2497   } else {
2498     // Avoid emitting unnecessary branches to the next block.
2499     if (JT.MBB != NextBlock(SwitchBB))
2500       DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, CopyTo,
2501                               DAG.getBasicBlock(JT.MBB)));
2502     else
2503       DAG.setRoot(CopyTo);
2504   }
2505 }
2506 
2507 /// Create a LOAD_STACK_GUARD node, and let it carry the target specific global
2508 /// variable if there exists one.
2509 static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL,
2510                                  SDValue &Chain) {
2511   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2512   EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
2513   EVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout());
2514   MachineFunction &MF = DAG.getMachineFunction();
2515   Value *Global = TLI.getSDagStackGuard(*MF.getFunction().getParent());
2516   MachineSDNode *Node =
2517       DAG.getMachineNode(TargetOpcode::LOAD_STACK_GUARD, DL, PtrTy, Chain);
2518   if (Global) {
2519     MachinePointerInfo MPInfo(Global);
2520     auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
2521                  MachineMemOperand::MODereferenceable;
2522     MachineMemOperand *MemRef = MF.getMachineMemOperand(
2523         MPInfo, Flags, PtrTy.getSizeInBits() / 8, DAG.getEVTAlign(PtrTy));
2524     DAG.setNodeMemRefs(Node, {MemRef});
2525   }
2526   if (PtrTy != PtrMemTy)
2527     return DAG.getPtrExtOrTrunc(SDValue(Node, 0), DL, PtrMemTy);
2528   return SDValue(Node, 0);
2529 }
2530 
2531 /// Codegen a new tail for a stack protector check ParentMBB which has had its
2532 /// tail spliced into a stack protector check success bb.
2533 ///
2534 /// For a high level explanation of how this fits into the stack protector
2535 /// generation see the comment on the declaration of class
2536 /// StackProtectorDescriptor.
2537 void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
2538                                                   MachineBasicBlock *ParentBB) {
2539 
2540   // First create the loads to the guard/stack slot for the comparison.
2541   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2542   EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
2543   EVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout());
2544 
2545   MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
2546   int FI = MFI.getStackProtectorIndex();
2547 
2548   SDValue Guard;
2549   SDLoc dl = getCurSDLoc();
2550   SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy);
2551   const Module &M = *ParentBB->getParent()->getFunction().getParent();
2552   unsigned Align = DL->getPrefTypeAlignment(Type::getInt8PtrTy(M.getContext()));
2553 
2554   // Generate code to load the content of the guard slot.
2555   SDValue GuardVal = DAG.getLoad(
2556       PtrMemTy, dl, DAG.getEntryNode(), StackSlotPtr,
2557       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), Align,
2558       MachineMemOperand::MOVolatile);
2559 
2560   if (TLI.useStackGuardXorFP())
2561     GuardVal = TLI.emitStackGuardXorFP(DAG, GuardVal, dl);
2562 
2563   // Retrieve guard check function, nullptr if instrumentation is inlined.
2564   if (const Function *GuardCheckFn = TLI.getSSPStackGuardCheck(M)) {
2565     // The target provides a guard check function to validate the guard value.
2566     // Generate a call to that function with the content of the guard slot as
2567     // argument.
2568     FunctionType *FnTy = GuardCheckFn->getFunctionType();
2569     assert(FnTy->getNumParams() == 1 && "Invalid function signature");
2570 
2571     TargetLowering::ArgListTy Args;
2572     TargetLowering::ArgListEntry Entry;
2573     Entry.Node = GuardVal;
2574     Entry.Ty = FnTy->getParamType(0);
2575     if (GuardCheckFn->hasAttribute(1, Attribute::AttrKind::InReg))
2576       Entry.IsInReg = true;
2577     Args.push_back(Entry);
2578 
2579     TargetLowering::CallLoweringInfo CLI(DAG);
2580     CLI.setDebugLoc(getCurSDLoc())
2581         .setChain(DAG.getEntryNode())
2582         .setCallee(GuardCheckFn->getCallingConv(), FnTy->getReturnType(),
2583                    getValue(GuardCheckFn), std::move(Args));
2584 
2585     std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
2586     DAG.setRoot(Result.second);
2587     return;
2588   }
2589 
2590   // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
2591   // Otherwise, emit a volatile load to retrieve the stack guard value.
2592   SDValue Chain = DAG.getEntryNode();
2593   if (TLI.useLoadStackGuardNode()) {
2594     Guard = getLoadStackGuard(DAG, dl, Chain);
2595   } else {
2596     const Value *IRGuard = TLI.getSDagStackGuard(M);
2597     SDValue GuardPtr = getValue(IRGuard);
2598 
2599     Guard = DAG.getLoad(PtrMemTy, dl, Chain, GuardPtr,
2600                         MachinePointerInfo(IRGuard, 0), Align,
2601                         MachineMemOperand::MOVolatile);
2602   }
2603 
2604   // Perform the comparison via a getsetcc.
2605   SDValue Cmp = DAG.getSetCC(dl, TLI.getSetCCResultType(DAG.getDataLayout(),
2606                                                         *DAG.getContext(),
2607                                                         Guard.getValueType()),
2608                              Guard, GuardVal, ISD::SETNE);
2609 
2610   // If the guard/stackslot do not equal, branch to failure MBB.
2611   SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2612                                MVT::Other, GuardVal.getOperand(0),
2613                                Cmp, DAG.getBasicBlock(SPD.getFailureMBB()));
2614   // Otherwise branch to success MBB.
2615   SDValue Br = DAG.getNode(ISD::BR, dl,
2616                            MVT::Other, BrCond,
2617                            DAG.getBasicBlock(SPD.getSuccessMBB()));
2618 
2619   DAG.setRoot(Br);
2620 }
2621 
2622 /// Codegen the failure basic block for a stack protector check.
2623 ///
2624 /// A failure stack protector machine basic block consists simply of a call to
2625 /// __stack_chk_fail().
2626 ///
2627 /// For a high level explanation of how this fits into the stack protector
2628 /// generation see the comment on the declaration of class
2629 /// StackProtectorDescriptor.
2630 void
2631 SelectionDAGBuilder::visitSPDescriptorFailure(StackProtectorDescriptor &SPD) {
2632   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2633   TargetLowering::MakeLibCallOptions CallOptions;
2634   CallOptions.setDiscardResult(true);
2635   SDValue Chain =
2636       TLI.makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid,
2637                       None, CallOptions, getCurSDLoc()).second;
2638   // On PS4, the "return address" must still be within the calling function,
2639   // even if it's at the very end, so emit an explicit TRAP here.
2640   // Passing 'true' for doesNotReturn above won't generate the trap for us.
2641   if (TM.getTargetTriple().isPS4CPU())
2642     Chain = DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, Chain);
2643 
2644   DAG.setRoot(Chain);
2645 }
2646 
2647 /// visitBitTestHeader - This function emits necessary code to produce value
2648 /// suitable for "bit tests"
2649 void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
2650                                              MachineBasicBlock *SwitchBB) {
2651   SDLoc dl = getCurSDLoc();
2652 
2653   // Subtract the minimum value.
2654   SDValue SwitchOp = getValue(B.SValue);
2655   EVT VT = SwitchOp.getValueType();
2656   SDValue RangeSub =
2657       DAG.getNode(ISD::SUB, dl, VT, SwitchOp, DAG.getConstant(B.First, dl, VT));
2658 
2659   // Determine the type of the test operands.
2660   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2661   bool UsePtrType = false;
2662   if (!TLI.isTypeLegal(VT)) {
2663     UsePtrType = true;
2664   } else {
2665     for (unsigned i = 0, e = B.Cases.size(); i != e; ++i)
2666       if (!isUIntN(VT.getSizeInBits(), B.Cases[i].Mask)) {
2667         // Switch table case range are encoded into series of masks.
2668         // Just use pointer type, it's guaranteed to fit.
2669         UsePtrType = true;
2670         break;
2671       }
2672   }
2673   SDValue Sub = RangeSub;
2674   if (UsePtrType) {
2675     VT = TLI.getPointerTy(DAG.getDataLayout());
2676     Sub = DAG.getZExtOrTrunc(Sub, dl, VT);
2677   }
2678 
2679   B.RegVT = VT.getSimpleVT();
2680   B.Reg = FuncInfo.CreateReg(B.RegVT);
2681   SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl, B.Reg, Sub);
2682 
2683   MachineBasicBlock* MBB = B.Cases[0].ThisBB;
2684 
2685   if (!B.OmitRangeCheck)
2686     addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb);
2687   addSuccessorWithProb(SwitchBB, MBB, B.Prob);
2688   SwitchBB->normalizeSuccProbs();
2689 
2690   SDValue Root = CopyTo;
2691   if (!B.OmitRangeCheck) {
2692     // Conditional branch to the default block.
2693     SDValue RangeCmp = DAG.getSetCC(dl,
2694         TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
2695                                RangeSub.getValueType()),
2696         RangeSub, DAG.getConstant(B.Range, dl, RangeSub.getValueType()),
2697         ISD::SETUGT);
2698 
2699     Root = DAG.getNode(ISD::BRCOND, dl, MVT::Other, Root, RangeCmp,
2700                        DAG.getBasicBlock(B.Default));
2701   }
2702 
2703   // Avoid emitting unnecessary branches to the next block.
2704   if (MBB != NextBlock(SwitchBB))
2705     Root = DAG.getNode(ISD::BR, dl, MVT::Other, Root, DAG.getBasicBlock(MBB));
2706 
2707   DAG.setRoot(Root);
2708 }
2709 
2710 /// visitBitTestCase - this function produces one "bit test"
2711 void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB,
2712                                            MachineBasicBlock* NextMBB,
2713                                            BranchProbability BranchProbToNext,
2714                                            unsigned Reg,
2715                                            BitTestCase &B,
2716                                            MachineBasicBlock *SwitchBB) {
2717   SDLoc dl = getCurSDLoc();
2718   MVT VT = BB.RegVT;
2719   SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), dl, Reg, VT);
2720   SDValue Cmp;
2721   unsigned PopCount = countPopulation(B.Mask);
2722   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2723   if (PopCount == 1) {
2724     // Testing for a single bit; just compare the shift count with what it
2725     // would need to be to shift a 1 bit in that position.
2726     Cmp = DAG.getSetCC(
2727         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2728         ShiftOp, DAG.getConstant(countTrailingZeros(B.Mask), dl, VT),
2729         ISD::SETEQ);
2730   } else if (PopCount == BB.Range) {
2731     // There is only one zero bit in the range, test for it directly.
2732     Cmp = DAG.getSetCC(
2733         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2734         ShiftOp, DAG.getConstant(countTrailingOnes(B.Mask), dl, VT),
2735         ISD::SETNE);
2736   } else {
2737     // Make desired shift
2738     SDValue SwitchVal = DAG.getNode(ISD::SHL, dl, VT,
2739                                     DAG.getConstant(1, dl, VT), ShiftOp);
2740 
2741     // Emit bit tests and jumps
2742     SDValue AndOp = DAG.getNode(ISD::AND, dl,
2743                                 VT, SwitchVal, DAG.getConstant(B.Mask, dl, VT));
2744     Cmp = DAG.getSetCC(
2745         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2746         AndOp, DAG.getConstant(0, dl, VT), ISD::SETNE);
2747   }
2748 
2749   // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
2750   addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb);
2751   // The branch probability from SwitchBB to NextMBB is BranchProbToNext.
2752   addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
2753   // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is
2754   // one as they are relative probabilities (and thus work more like weights),
2755   // and hence we need to normalize them to let the sum of them become one.
2756   SwitchBB->normalizeSuccProbs();
2757 
2758   SDValue BrAnd = DAG.getNode(ISD::BRCOND, dl,
2759                               MVT::Other, getControlRoot(),
2760                               Cmp, DAG.getBasicBlock(B.TargetBB));
2761 
2762   // Avoid emitting unnecessary branches to the next block.
2763   if (NextMBB != NextBlock(SwitchBB))
2764     BrAnd = DAG.getNode(ISD::BR, dl, MVT::Other, BrAnd,
2765                         DAG.getBasicBlock(NextMBB));
2766 
2767   DAG.setRoot(BrAnd);
2768 }
2769 
2770 void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
2771   MachineBasicBlock *InvokeMBB = FuncInfo.MBB;
2772 
2773   // Retrieve successors. Look through artificial IR level blocks like
2774   // catchswitch for successors.
2775   MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
2776   const BasicBlock *EHPadBB = I.getSuccessor(1);
2777 
2778   // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
2779   // have to do anything here to lower funclet bundles.
2780   assert(!I.hasOperandBundlesOtherThan({LLVMContext::OB_deopt,
2781                                         LLVMContext::OB_gc_transition,
2782                                         LLVMContext::OB_funclet,
2783                                         LLVMContext::OB_cfguardtarget}) &&
2784          "Cannot lower invokes with arbitrary operand bundles yet!");
2785 
2786   const Value *Callee(I.getCalledOperand());
2787   const Function *Fn = dyn_cast<Function>(Callee);
2788   if (isa<InlineAsm>(Callee))
2789     visitInlineAsm(I);
2790   else if (Fn && Fn->isIntrinsic()) {
2791     switch (Fn->getIntrinsicID()) {
2792     default:
2793       llvm_unreachable("Cannot invoke this intrinsic");
2794     case Intrinsic::donothing:
2795       // Ignore invokes to @llvm.donothing: jump directly to the next BB.
2796       break;
2797     case Intrinsic::experimental_patchpoint_void:
2798     case Intrinsic::experimental_patchpoint_i64:
2799       visitPatchpoint(I, EHPadBB);
2800       break;
2801     case Intrinsic::experimental_gc_statepoint:
2802       LowerStatepoint(cast<GCStatepointInst>(I), EHPadBB);
2803       break;
2804     case Intrinsic::wasm_rethrow_in_catch: {
2805       // This is usually done in visitTargetIntrinsic, but this intrinsic is
2806       // special because it can be invoked, so we manually lower it to a DAG
2807       // node here.
2808       SmallVector<SDValue, 8> Ops;
2809       Ops.push_back(getRoot()); // inchain
2810       const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2811       Ops.push_back(
2812           DAG.getTargetConstant(Intrinsic::wasm_rethrow_in_catch, getCurSDLoc(),
2813                                 TLI.getPointerTy(DAG.getDataLayout())));
2814       SDVTList VTs = DAG.getVTList(ArrayRef<EVT>({MVT::Other})); // outchain
2815       DAG.setRoot(DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops));
2816       break;
2817     }
2818     }
2819   } else if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) {
2820     // Currently we do not lower any intrinsic calls with deopt operand bundles.
2821     // Eventually we will support lowering the @llvm.experimental.deoptimize
2822     // intrinsic, and right now there are no plans to support other intrinsics
2823     // with deopt state.
2824     LowerCallSiteWithDeoptBundle(&I, getValue(Callee), EHPadBB);
2825   } else {
2826     LowerCallTo(I, getValue(Callee), false, EHPadBB);
2827   }
2828 
2829   // If the value of the invoke is used outside of its defining block, make it
2830   // available as a virtual register.
2831   // We already took care of the exported value for the statepoint instruction
2832   // during call to the LowerStatepoint.
2833   if (!isa<GCStatepointInst>(I)) {
2834     CopyToExportRegsIfNeeded(&I);
2835   }
2836 
2837   SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
2838   BranchProbabilityInfo *BPI = FuncInfo.BPI;
2839   BranchProbability EHPadBBProb =
2840       BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB)
2841           : BranchProbability::getZero();
2842   findUnwindDestinations(FuncInfo, EHPadBB, EHPadBBProb, UnwindDests);
2843 
2844   // Update successor info.
2845   addSuccessorWithProb(InvokeMBB, Return);
2846   for (auto &UnwindDest : UnwindDests) {
2847     UnwindDest.first->setIsEHPad();
2848     addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
2849   }
2850   InvokeMBB->normalizeSuccProbs();
2851 
2852   // Drop into normal successor.
2853   DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, getControlRoot(),
2854                           DAG.getBasicBlock(Return)));
2855 }
2856 
2857 void SelectionDAGBuilder::visitCallBr(const CallBrInst &I) {
2858   MachineBasicBlock *CallBrMBB = FuncInfo.MBB;
2859 
2860   // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
2861   // have to do anything here to lower funclet bundles.
2862   assert(!I.hasOperandBundlesOtherThan(
2863              {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) &&
2864          "Cannot lower callbrs with arbitrary operand bundles yet!");
2865 
2866   assert(I.isInlineAsm() && "Only know how to handle inlineasm callbr");
2867   visitInlineAsm(I);
2868   CopyToExportRegsIfNeeded(&I);
2869 
2870   // Retrieve successors.
2871   MachineBasicBlock *Return = FuncInfo.MBBMap[I.getDefaultDest()];
2872   Return->setInlineAsmBrDefaultTarget();
2873 
2874   // Update successor info.
2875   addSuccessorWithProb(CallBrMBB, Return, BranchProbability::getOne());
2876   for (unsigned i = 0, e = I.getNumIndirectDests(); i < e; ++i) {
2877     MachineBasicBlock *Target = FuncInfo.MBBMap[I.getIndirectDest(i)];
2878     addSuccessorWithProb(CallBrMBB, Target, BranchProbability::getZero());
2879     CallBrMBB->addInlineAsmBrIndirectTarget(Target);
2880   }
2881   CallBrMBB->normalizeSuccProbs();
2882 
2883   // Drop into default successor.
2884   DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
2885                           MVT::Other, getControlRoot(),
2886                           DAG.getBasicBlock(Return)));
2887 }
2888 
2889 void SelectionDAGBuilder::visitResume(const ResumeInst &RI) {
2890   llvm_unreachable("SelectionDAGBuilder shouldn't visit resume instructions!");
2891 }
2892 
2893 void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) {
2894   assert(FuncInfo.MBB->isEHPad() &&
2895          "Call to landingpad not in landing pad!");
2896 
2897   // If there aren't registers to copy the values into (e.g., during SjLj
2898   // exceptions), then don't bother to create these DAG nodes.
2899   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2900   const Constant *PersonalityFn = FuncInfo.Fn->getPersonalityFn();
2901   if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
2902       TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
2903     return;
2904 
2905   // If landingpad's return type is token type, we don't create DAG nodes
2906   // for its exception pointer and selector value. The extraction of exception
2907   // pointer or selector value from token type landingpads is not currently
2908   // supported.
2909   if (LP.getType()->isTokenTy())
2910     return;
2911 
2912   SmallVector<EVT, 2> ValueVTs;
2913   SDLoc dl = getCurSDLoc();
2914   ComputeValueVTs(TLI, DAG.getDataLayout(), LP.getType(), ValueVTs);
2915   assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported");
2916 
2917   // Get the two live-in registers as SDValues. The physregs have already been
2918   // copied into virtual registers.
2919   SDValue Ops[2];
2920   if (FuncInfo.ExceptionPointerVirtReg) {
2921     Ops[0] = DAG.getZExtOrTrunc(
2922         DAG.getCopyFromReg(DAG.getEntryNode(), dl,
2923                            FuncInfo.ExceptionPointerVirtReg,
2924                            TLI.getPointerTy(DAG.getDataLayout())),
2925         dl, ValueVTs[0]);
2926   } else {
2927     Ops[0] = DAG.getConstant(0, dl, TLI.getPointerTy(DAG.getDataLayout()));
2928   }
2929   Ops[1] = DAG.getZExtOrTrunc(
2930       DAG.getCopyFromReg(DAG.getEntryNode(), dl,
2931                          FuncInfo.ExceptionSelectorVirtReg,
2932                          TLI.getPointerTy(DAG.getDataLayout())),
2933       dl, ValueVTs[1]);
2934 
2935   // Merge into one.
2936   SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl,
2937                             DAG.getVTList(ValueVTs), Ops);
2938   setValue(&LP, Res);
2939 }
2940 
2941 void SelectionDAGBuilder::UpdateSplitBlock(MachineBasicBlock *First,
2942                                            MachineBasicBlock *Last) {
2943   // Update JTCases.
2944   for (unsigned i = 0, e = SL->JTCases.size(); i != e; ++i)
2945     if (SL->JTCases[i].first.HeaderBB == First)
2946       SL->JTCases[i].first.HeaderBB = Last;
2947 
2948   // Update BitTestCases.
2949   for (unsigned i = 0, e = SL->BitTestCases.size(); i != e; ++i)
2950     if (SL->BitTestCases[i].Parent == First)
2951       SL->BitTestCases[i].Parent = Last;
2952 
2953   // SelectionDAGISel::FinishBasicBlock will add PHI operands for the
2954   // successors of the fallthrough block. Here, we add PHI operands for the
2955   // successors of the INLINEASM_BR block itself.
2956   if (First->getFirstTerminator()->getOpcode() == TargetOpcode::INLINEASM_BR)
2957     for (std::pair<MachineInstr *, unsigned> &pair : FuncInfo.PHINodesToUpdate)
2958       if (First->isSuccessor(pair.first->getParent()))
2959         MachineInstrBuilder(*First->getParent(), pair.first)
2960             .addReg(pair.second)
2961             .addMBB(First);
2962 }
2963 
2964 void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
2965   MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB;
2966 
2967   // Update machine-CFG edges with unique successors.
2968   SmallSet<BasicBlock*, 32> Done;
2969   for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) {
2970     BasicBlock *BB = I.getSuccessor(i);
2971     bool Inserted = Done.insert(BB).second;
2972     if (!Inserted)
2973         continue;
2974 
2975     MachineBasicBlock *Succ = FuncInfo.MBBMap[BB];
2976     addSuccessorWithProb(IndirectBrMBB, Succ);
2977   }
2978   IndirectBrMBB->normalizeSuccProbs();
2979 
2980   DAG.setRoot(DAG.getNode(ISD::BRIND, getCurSDLoc(),
2981                           MVT::Other, getControlRoot(),
2982                           getValue(I.getAddress())));
2983 }
2984 
2985 void SelectionDAGBuilder::visitUnreachable(const UnreachableInst &I) {
2986   if (!DAG.getTarget().Options.TrapUnreachable)
2987     return;
2988 
2989   // We may be able to ignore unreachable behind a noreturn call.
2990   if (DAG.getTarget().Options.NoTrapAfterNoreturn) {
2991     const BasicBlock &BB = *I.getParent();
2992     if (&I != &BB.front()) {
2993       BasicBlock::const_iterator PredI =
2994         std::prev(BasicBlock::const_iterator(&I));
2995       if (const CallInst *Call = dyn_cast<CallInst>(&*PredI)) {
2996         if (Call->doesNotReturn())
2997           return;
2998       }
2999     }
3000   }
3001 
3002   DAG.setRoot(DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, DAG.getRoot()));
3003 }
3004 
3005 void SelectionDAGBuilder::visitFSub(const User &I) {
3006   // -0.0 - X --> fneg
3007   Type *Ty = I.getType();
3008   if (isa<Constant>(I.getOperand(0)) &&
3009       I.getOperand(0) == ConstantFP::getZeroValueForNegation(Ty)) {
3010     SDValue Op2 = getValue(I.getOperand(1));
3011     setValue(&I, DAG.getNode(ISD::FNEG, getCurSDLoc(),
3012                              Op2.getValueType(), Op2));
3013     return;
3014   }
3015 
3016   visitBinary(I, ISD::FSUB);
3017 }
3018 
3019 void SelectionDAGBuilder::visitUnary(const User &I, unsigned Opcode) {
3020   SDNodeFlags Flags;
3021 
3022   SDValue Op = getValue(I.getOperand(0));
3023   SDValue UnNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op.getValueType(),
3024                                     Op, Flags);
3025   setValue(&I, UnNodeValue);
3026 }
3027 
3028 void SelectionDAGBuilder::visitBinary(const User &I, unsigned Opcode) {
3029   SDNodeFlags Flags;
3030   if (auto *OFBinOp = dyn_cast<OverflowingBinaryOperator>(&I)) {
3031     Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap());
3032     Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap());
3033   }
3034   if (auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I)) {
3035     Flags.setExact(ExactOp->isExact());
3036   }
3037 
3038   SDValue Op1 = getValue(I.getOperand(0));
3039   SDValue Op2 = getValue(I.getOperand(1));
3040   SDValue BinNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(),
3041                                      Op1, Op2, Flags);
3042   setValue(&I, BinNodeValue);
3043 }
3044 
3045 void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
3046   SDValue Op1 = getValue(I.getOperand(0));
3047   SDValue Op2 = getValue(I.getOperand(1));
3048 
3049   EVT ShiftTy = DAG.getTargetLoweringInfo().getShiftAmountTy(
3050       Op1.getValueType(), DAG.getDataLayout());
3051 
3052   // Coerce the shift amount to the right type if we can.
3053   if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {
3054     unsigned ShiftSize = ShiftTy.getSizeInBits();
3055     unsigned Op2Size = Op2.getValueSizeInBits();
3056     SDLoc DL = getCurSDLoc();
3057 
3058     // If the operand is smaller than the shift count type, promote it.
3059     if (ShiftSize > Op2Size)
3060       Op2 = DAG.getNode(ISD::ZERO_EXTEND, DL, ShiftTy, Op2);
3061 
3062     // If the operand is larger than the shift count type but the shift
3063     // count type has enough bits to represent any shift value, truncate
3064     // it now. This is a common case and it exposes the truncate to
3065     // optimization early.
3066     else if (ShiftSize >= Log2_32_Ceil(Op2.getValueSizeInBits()))
3067       Op2 = DAG.getNode(ISD::TRUNCATE, DL, ShiftTy, Op2);
3068     // Otherwise we'll need to temporarily settle for some other convenient
3069     // type.  Type legalization will make adjustments once the shiftee is split.
3070     else
3071       Op2 = DAG.getZExtOrTrunc(Op2, DL, MVT::i32);
3072   }
3073 
3074   bool nuw = false;
3075   bool nsw = false;
3076   bool exact = false;
3077 
3078   if (Opcode == ISD::SRL || Opcode == ISD::SRA || Opcode == ISD::SHL) {
3079 
3080     if (const OverflowingBinaryOperator *OFBinOp =
3081             dyn_cast<const OverflowingBinaryOperator>(&I)) {
3082       nuw = OFBinOp->hasNoUnsignedWrap();
3083       nsw = OFBinOp->hasNoSignedWrap();
3084     }
3085     if (const PossiblyExactOperator *ExactOp =
3086             dyn_cast<const PossiblyExactOperator>(&I))
3087       exact = ExactOp->isExact();
3088   }
3089   SDNodeFlags Flags;
3090   Flags.setExact(exact);
3091   Flags.setNoSignedWrap(nsw);
3092   Flags.setNoUnsignedWrap(nuw);
3093   SDValue Res = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(), Op1, Op2,
3094                             Flags);
3095   setValue(&I, Res);
3096 }
3097 
3098 void SelectionDAGBuilder::visitSDiv(const User &I) {
3099   SDValue Op1 = getValue(I.getOperand(0));
3100   SDValue Op2 = getValue(I.getOperand(1));
3101 
3102   SDNodeFlags Flags;
3103   Flags.setExact(isa<PossiblyExactOperator>(&I) &&
3104                  cast<PossiblyExactOperator>(&I)->isExact());
3105   setValue(&I, DAG.getNode(ISD::SDIV, getCurSDLoc(), Op1.getValueType(), Op1,
3106                            Op2, Flags));
3107 }
3108 
3109 void SelectionDAGBuilder::visitICmp(const User &I) {
3110   ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
3111   if (const ICmpInst *IC = dyn_cast<ICmpInst>(&I))
3112     predicate = IC->getPredicate();
3113   else if (const ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
3114     predicate = ICmpInst::Predicate(IC->getPredicate());
3115   SDValue Op1 = getValue(I.getOperand(0));
3116   SDValue Op2 = getValue(I.getOperand(1));
3117   ISD::CondCode Opcode = getICmpCondCode(predicate);
3118 
3119   auto &TLI = DAG.getTargetLoweringInfo();
3120   EVT MemVT =
3121       TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
3122 
3123   // If a pointer's DAG type is larger than its memory type then the DAG values
3124   // are zero-extended. This breaks signed comparisons so truncate back to the
3125   // underlying type before doing the compare.
3126   if (Op1.getValueType() != MemVT) {
3127     Op1 = DAG.getPtrExtOrTrunc(Op1, getCurSDLoc(), MemVT);
3128     Op2 = DAG.getPtrExtOrTrunc(Op2, getCurSDLoc(), MemVT);
3129   }
3130 
3131   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3132                                                         I.getType());
3133   setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Opcode));
3134 }
3135 
3136 void SelectionDAGBuilder::visitFCmp(const User &I) {
3137   FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
3138   if (const FCmpInst *FC = dyn_cast<FCmpInst>(&I))
3139     predicate = FC->getPredicate();
3140   else if (const ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
3141     predicate = FCmpInst::Predicate(FC->getPredicate());
3142   SDValue Op1 = getValue(I.getOperand(0));
3143   SDValue Op2 = getValue(I.getOperand(1));
3144 
3145   ISD::CondCode Condition = getFCmpCondCode(predicate);
3146   auto *FPMO = dyn_cast<FPMathOperator>(&I);
3147   if ((FPMO && FPMO->hasNoNaNs()) || TM.Options.NoNaNsFPMath)
3148     Condition = getFCmpCodeWithoutNaN(Condition);
3149 
3150   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3151                                                         I.getType());
3152   setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition));
3153 }
3154 
3155 // Check if the condition of the select has one use or two users that are both
3156 // selects with the same condition.
3157 static bool hasOnlySelectUsers(const Value *Cond) {
3158   return llvm::all_of(Cond->users(), [](const Value *V) {
3159     return isa<SelectInst>(V);
3160   });
3161 }
3162 
3163 void SelectionDAGBuilder::visitSelect(const User &I) {
3164   SmallVector<EVT, 4> ValueVTs;
3165   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
3166                   ValueVTs);
3167   unsigned NumValues = ValueVTs.size();
3168   if (NumValues == 0) return;
3169 
3170   SmallVector<SDValue, 4> Values(NumValues);
3171   SDValue Cond     = getValue(I.getOperand(0));
3172   SDValue LHSVal   = getValue(I.getOperand(1));
3173   SDValue RHSVal   = getValue(I.getOperand(2));
3174   SmallVector<SDValue, 1> BaseOps(1, Cond);
3175   ISD::NodeType OpCode =
3176       Cond.getValueType().isVector() ? ISD::VSELECT : ISD::SELECT;
3177 
3178   bool IsUnaryAbs = false;
3179 
3180   // Min/max matching is only viable if all output VTs are the same.
3181   if (is_splat(ValueVTs)) {
3182     EVT VT = ValueVTs[0];
3183     LLVMContext &Ctx = *DAG.getContext();
3184     auto &TLI = DAG.getTargetLoweringInfo();
3185 
3186     // We care about the legality of the operation after it has been type
3187     // legalized.
3188     while (TLI.getTypeAction(Ctx, VT) != TargetLoweringBase::TypeLegal)
3189       VT = TLI.getTypeToTransformTo(Ctx, VT);
3190 
3191     // If the vselect is legal, assume we want to leave this as a vector setcc +
3192     // vselect. Otherwise, if this is going to be scalarized, we want to see if
3193     // min/max is legal on the scalar type.
3194     bool UseScalarMinMax = VT.isVector() &&
3195       !TLI.isOperationLegalOrCustom(ISD::VSELECT, VT);
3196 
3197     Value *LHS, *RHS;
3198     auto SPR = matchSelectPattern(const_cast<User*>(&I), LHS, RHS);
3199     ISD::NodeType Opc = ISD::DELETED_NODE;
3200     switch (SPR.Flavor) {
3201     case SPF_UMAX:    Opc = ISD::UMAX; break;
3202     case SPF_UMIN:    Opc = ISD::UMIN; break;
3203     case SPF_SMAX:    Opc = ISD::SMAX; break;
3204     case SPF_SMIN:    Opc = ISD::SMIN; break;
3205     case SPF_FMINNUM:
3206       switch (SPR.NaNBehavior) {
3207       case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
3208       case SPNB_RETURNS_NAN:   Opc = ISD::FMINIMUM; break;
3209       case SPNB_RETURNS_OTHER: Opc = ISD::FMINNUM; break;
3210       case SPNB_RETURNS_ANY: {
3211         if (TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT))
3212           Opc = ISD::FMINNUM;
3213         else if (TLI.isOperationLegalOrCustom(ISD::FMINIMUM, VT))
3214           Opc = ISD::FMINIMUM;
3215         else if (UseScalarMinMax)
3216           Opc = TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT.getScalarType()) ?
3217             ISD::FMINNUM : ISD::FMINIMUM;
3218         break;
3219       }
3220       }
3221       break;
3222     case SPF_FMAXNUM:
3223       switch (SPR.NaNBehavior) {
3224       case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
3225       case SPNB_RETURNS_NAN:   Opc = ISD::FMAXIMUM; break;
3226       case SPNB_RETURNS_OTHER: Opc = ISD::FMAXNUM; break;
3227       case SPNB_RETURNS_ANY:
3228 
3229         if (TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT))
3230           Opc = ISD::FMAXNUM;
3231         else if (TLI.isOperationLegalOrCustom(ISD::FMAXIMUM, VT))
3232           Opc = ISD::FMAXIMUM;
3233         else if (UseScalarMinMax)
3234           Opc = TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT.getScalarType()) ?
3235             ISD::FMAXNUM : ISD::FMAXIMUM;
3236         break;
3237       }
3238       break;
3239     case SPF_ABS:
3240       IsUnaryAbs = true;
3241       Opc = ISD::ABS;
3242       break;
3243     case SPF_NABS:
3244       // TODO: we need to produce sub(0, abs(X)).
3245     default: break;
3246     }
3247 
3248     if (!IsUnaryAbs && Opc != ISD::DELETED_NODE &&
3249         (TLI.isOperationLegalOrCustom(Opc, VT) ||
3250          (UseScalarMinMax &&
3251           TLI.isOperationLegalOrCustom(Opc, VT.getScalarType()))) &&
3252         // If the underlying comparison instruction is used by any other
3253         // instruction, the consumed instructions won't be destroyed, so it is
3254         // not profitable to convert to a min/max.
3255         hasOnlySelectUsers(cast<SelectInst>(I).getCondition())) {
3256       OpCode = Opc;
3257       LHSVal = getValue(LHS);
3258       RHSVal = getValue(RHS);
3259       BaseOps.clear();
3260     }
3261 
3262     if (IsUnaryAbs) {
3263       OpCode = Opc;
3264       LHSVal = getValue(LHS);
3265       BaseOps.clear();
3266     }
3267   }
3268 
3269   if (IsUnaryAbs) {
3270     for (unsigned i = 0; i != NumValues; ++i) {
3271       Values[i] =
3272           DAG.getNode(OpCode, getCurSDLoc(),
3273                       LHSVal.getNode()->getValueType(LHSVal.getResNo() + i),
3274                       SDValue(LHSVal.getNode(), LHSVal.getResNo() + i));
3275     }
3276   } else {
3277     for (unsigned i = 0; i != NumValues; ++i) {
3278       SmallVector<SDValue, 3> Ops(BaseOps.begin(), BaseOps.end());
3279       Ops.push_back(SDValue(LHSVal.getNode(), LHSVal.getResNo() + i));
3280       Ops.push_back(SDValue(RHSVal.getNode(), RHSVal.getResNo() + i));
3281       Values[i] = DAG.getNode(
3282           OpCode, getCurSDLoc(),
3283           LHSVal.getNode()->getValueType(LHSVal.getResNo() + i), Ops);
3284     }
3285   }
3286 
3287   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3288                            DAG.getVTList(ValueVTs), Values));
3289 }
3290 
3291 void SelectionDAGBuilder::visitTrunc(const User &I) {
3292   // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
3293   SDValue N = getValue(I.getOperand(0));
3294   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3295                                                         I.getType());
3296   setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), DestVT, N));
3297 }
3298 
3299 void SelectionDAGBuilder::visitZExt(const User &I) {
3300   // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3301   // ZExt also can't be a cast to bool for same reason. So, nothing much to do
3302   SDValue N = getValue(I.getOperand(0));
3303   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3304                                                         I.getType());
3305   setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurSDLoc(), DestVT, N));
3306 }
3307 
3308 void SelectionDAGBuilder::visitSExt(const User &I) {
3309   // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3310   // SExt also can't be a cast to bool for same reason. So, nothing much to do
3311   SDValue N = getValue(I.getOperand(0));
3312   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3313                                                         I.getType());
3314   setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N));
3315 }
3316 
3317 void SelectionDAGBuilder::visitFPTrunc(const User &I) {
3318   // FPTrunc is never a no-op cast, no need to check
3319   SDValue N = getValue(I.getOperand(0));
3320   SDLoc dl = getCurSDLoc();
3321   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3322   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3323   setValue(&I, DAG.getNode(ISD::FP_ROUND, dl, DestVT, N,
3324                            DAG.getTargetConstant(
3325                                0, dl, TLI.getPointerTy(DAG.getDataLayout()))));
3326 }
3327 
3328 void SelectionDAGBuilder::visitFPExt(const User &I) {
3329   // FPExt is never a no-op cast, no need to check
3330   SDValue N = getValue(I.getOperand(0));
3331   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3332                                                         I.getType());
3333   setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurSDLoc(), DestVT, N));
3334 }
3335 
3336 void SelectionDAGBuilder::visitFPToUI(const User &I) {
3337   // FPToUI is never a no-op cast, no need to check
3338   SDValue N = getValue(I.getOperand(0));
3339   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3340                                                         I.getType());
3341   setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurSDLoc(), DestVT, N));
3342 }
3343 
3344 void SelectionDAGBuilder::visitFPToSI(const User &I) {
3345   // FPToSI is never a no-op cast, no need to check
3346   SDValue N = getValue(I.getOperand(0));
3347   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3348                                                         I.getType());
3349   setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurSDLoc(), DestVT, N));
3350 }
3351 
3352 void SelectionDAGBuilder::visitUIToFP(const User &I) {
3353   // UIToFP is never a no-op cast, no need to check
3354   SDValue N = getValue(I.getOperand(0));
3355   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3356                                                         I.getType());
3357   setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurSDLoc(), DestVT, N));
3358 }
3359 
3360 void SelectionDAGBuilder::visitSIToFP(const User &I) {
3361   // SIToFP is never a no-op cast, no need to check
3362   SDValue N = getValue(I.getOperand(0));
3363   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3364                                                         I.getType());
3365   setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurSDLoc(), DestVT, N));
3366 }
3367 
3368 void SelectionDAGBuilder::visitPtrToInt(const User &I) {
3369   // What to do depends on the size of the integer and the size of the pointer.
3370   // We can either truncate, zero extend, or no-op, accordingly.
3371   SDValue N = getValue(I.getOperand(0));
3372   auto &TLI = DAG.getTargetLoweringInfo();
3373   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3374                                                         I.getType());
3375   EVT PtrMemVT =
3376       TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
3377   N = DAG.getPtrExtOrTrunc(N, getCurSDLoc(), PtrMemVT);
3378   N = DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT);
3379   setValue(&I, N);
3380 }
3381 
3382 void SelectionDAGBuilder::visitIntToPtr(const User &I) {
3383   // What to do depends on the size of the integer and the size of the pointer.
3384   // We can either truncate, zero extend, or no-op, accordingly.
3385   SDValue N = getValue(I.getOperand(0));
3386   auto &TLI = DAG.getTargetLoweringInfo();
3387   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3388   EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getType());
3389   N = DAG.getZExtOrTrunc(N, getCurSDLoc(), PtrMemVT);
3390   N = DAG.getPtrExtOrTrunc(N, getCurSDLoc(), DestVT);
3391   setValue(&I, N);
3392 }
3393 
3394 void SelectionDAGBuilder::visitBitCast(const User &I) {
3395   SDValue N = getValue(I.getOperand(0));
3396   SDLoc dl = getCurSDLoc();
3397   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3398                                                         I.getType());
3399 
3400   // BitCast assures us that source and destination are the same size so this is
3401   // either a BITCAST or a no-op.
3402   if (DestVT != N.getValueType())
3403     setValue(&I, DAG.getNode(ISD::BITCAST, dl,
3404                              DestVT, N)); // convert types.
3405   // Check if the original LLVM IR Operand was a ConstantInt, because getValue()
3406   // might fold any kind of constant expression to an integer constant and that
3407   // is not what we are looking for. Only recognize a bitcast of a genuine
3408   // constant integer as an opaque constant.
3409   else if(ConstantInt *C = dyn_cast<ConstantInt>(I.getOperand(0)))
3410     setValue(&I, DAG.getConstant(C->getValue(), dl, DestVT, /*isTarget=*/false,
3411                                  /*isOpaque*/true));
3412   else
3413     setValue(&I, N);            // noop cast.
3414 }
3415 
3416 void SelectionDAGBuilder::visitAddrSpaceCast(const User &I) {
3417   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3418   const Value *SV = I.getOperand(0);
3419   SDValue N = getValue(SV);
3420   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3421 
3422   unsigned SrcAS = SV->getType()->getPointerAddressSpace();
3423   unsigned DestAS = I.getType()->getPointerAddressSpace();
3424 
3425   if (!TLI.isNoopAddrSpaceCast(SrcAS, DestAS))
3426     N = DAG.getAddrSpaceCast(getCurSDLoc(), DestVT, N, SrcAS, DestAS);
3427 
3428   setValue(&I, N);
3429 }
3430 
3431 void SelectionDAGBuilder::visitInsertElement(const User &I) {
3432   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3433   SDValue InVec = getValue(I.getOperand(0));
3434   SDValue InVal = getValue(I.getOperand(1));
3435   SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(2)), getCurSDLoc(),
3436                                      TLI.getVectorIdxTy(DAG.getDataLayout()));
3437   setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurSDLoc(),
3438                            TLI.getValueType(DAG.getDataLayout(), I.getType()),
3439                            InVec, InVal, InIdx));
3440 }
3441 
3442 void SelectionDAGBuilder::visitExtractElement(const User &I) {
3443   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3444   SDValue InVec = getValue(I.getOperand(0));
3445   SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(1)), getCurSDLoc(),
3446                                      TLI.getVectorIdxTy(DAG.getDataLayout()));
3447   setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(),
3448                            TLI.getValueType(DAG.getDataLayout(), I.getType()),
3449                            InVec, InIdx));
3450 }
3451 
3452 void SelectionDAGBuilder::visitShuffleVector(const User &I) {
3453   SDValue Src1 = getValue(I.getOperand(0));
3454   SDValue Src2 = getValue(I.getOperand(1));
3455   ArrayRef<int> Mask;
3456   if (auto *SVI = dyn_cast<ShuffleVectorInst>(&I))
3457     Mask = SVI->getShuffleMask();
3458   else
3459     Mask = cast<ConstantExpr>(I).getShuffleMask();
3460   SDLoc DL = getCurSDLoc();
3461   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3462   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3463   EVT SrcVT = Src1.getValueType();
3464 
3465   if (all_of(Mask, [](int Elem) { return Elem == 0; }) &&
3466       VT.isScalableVector()) {
3467     // Canonical splat form of first element of first input vector.
3468     SDValue FirstElt =
3469         DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SrcVT.getScalarType(), Src1,
3470                     DAG.getVectorIdxConstant(0, DL));
3471     setValue(&I, DAG.getNode(ISD::SPLAT_VECTOR, DL, VT, FirstElt));
3472     return;
3473   }
3474 
3475   // For now, we only handle splats for scalable vectors.
3476   // The DAGCombiner will perform a BUILD_VECTOR -> SPLAT_VECTOR transformation
3477   // for targets that support a SPLAT_VECTOR for non-scalable vector types.
3478   assert(!VT.isScalableVector() && "Unsupported scalable vector shuffle");
3479 
3480   unsigned SrcNumElts = SrcVT.getVectorNumElements();
3481   unsigned MaskNumElts = Mask.size();
3482 
3483   if (SrcNumElts == MaskNumElts) {
3484     setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, Mask));
3485     return;
3486   }
3487 
3488   // Normalize the shuffle vector since mask and vector length don't match.
3489   if (SrcNumElts < MaskNumElts) {
3490     // Mask is longer than the source vectors. We can use concatenate vector to
3491     // make the mask and vectors lengths match.
3492 
3493     if (MaskNumElts % SrcNumElts == 0) {
3494       // Mask length is a multiple of the source vector length.
3495       // Check if the shuffle is some kind of concatenation of the input
3496       // vectors.
3497       unsigned NumConcat = MaskNumElts / SrcNumElts;
3498       bool IsConcat = true;
3499       SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
3500       for (unsigned i = 0; i != MaskNumElts; ++i) {
3501         int Idx = Mask[i];
3502         if (Idx < 0)
3503           continue;
3504         // Ensure the indices in each SrcVT sized piece are sequential and that
3505         // the same source is used for the whole piece.
3506         if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
3507             (ConcatSrcs[i / SrcNumElts] >= 0 &&
3508              ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts))) {
3509           IsConcat = false;
3510           break;
3511         }
3512         // Remember which source this index came from.
3513         ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
3514       }
3515 
3516       // The shuffle is concatenating multiple vectors together. Just emit
3517       // a CONCAT_VECTORS operation.
3518       if (IsConcat) {
3519         SmallVector<SDValue, 8> ConcatOps;
3520         for (auto Src : ConcatSrcs) {
3521           if (Src < 0)
3522             ConcatOps.push_back(DAG.getUNDEF(SrcVT));
3523           else if (Src == 0)
3524             ConcatOps.push_back(Src1);
3525           else
3526             ConcatOps.push_back(Src2);
3527         }
3528         setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps));
3529         return;
3530       }
3531     }
3532 
3533     unsigned PaddedMaskNumElts = alignTo(MaskNumElts, SrcNumElts);
3534     unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
3535     EVT PaddedVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(),
3536                                     PaddedMaskNumElts);
3537 
3538     // Pad both vectors with undefs to make them the same length as the mask.
3539     SDValue UndefVal = DAG.getUNDEF(SrcVT);
3540 
3541     SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
3542     SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
3543     MOps1[0] = Src1;
3544     MOps2[0] = Src2;
3545 
3546     Src1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps1);
3547     Src2 = DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps2);
3548 
3549     // Readjust mask for new input vector length.
3550     SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1);
3551     for (unsigned i = 0; i != MaskNumElts; ++i) {
3552       int Idx = Mask[i];
3553       if (Idx >= (int)SrcNumElts)
3554         Idx -= SrcNumElts - PaddedMaskNumElts;
3555       MappedOps[i] = Idx;
3556     }
3557 
3558     SDValue Result = DAG.getVectorShuffle(PaddedVT, DL, Src1, Src2, MappedOps);
3559 
3560     // If the concatenated vector was padded, extract a subvector with the
3561     // correct number of elements.
3562     if (MaskNumElts != PaddedMaskNumElts)
3563       Result = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Result,
3564                            DAG.getVectorIdxConstant(0, DL));
3565 
3566     setValue(&I, Result);
3567     return;
3568   }
3569 
3570   if (SrcNumElts > MaskNumElts) {
3571     // Analyze the access pattern of the vector to see if we can extract
3572     // two subvectors and do the shuffle.
3573     int StartIdx[2] = { -1, -1 };  // StartIdx to extract from
3574     bool CanExtract = true;
3575     for (int Idx : Mask) {
3576       unsigned Input = 0;
3577       if (Idx < 0)
3578         continue;
3579 
3580       if (Idx >= (int)SrcNumElts) {
3581         Input = 1;
3582         Idx -= SrcNumElts;
3583       }
3584 
3585       // If all the indices come from the same MaskNumElts sized portion of
3586       // the sources we can use extract. Also make sure the extract wouldn't
3587       // extract past the end of the source.
3588       int NewStartIdx = alignDown(Idx, MaskNumElts);
3589       if (NewStartIdx + MaskNumElts > SrcNumElts ||
3590           (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
3591         CanExtract = false;
3592       // Make sure we always update StartIdx as we use it to track if all
3593       // elements are undef.
3594       StartIdx[Input] = NewStartIdx;
3595     }
3596 
3597     if (StartIdx[0] < 0 && StartIdx[1] < 0) {
3598       setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
3599       return;
3600     }
3601     if (CanExtract) {
3602       // Extract appropriate subvector and generate a vector shuffle
3603       for (unsigned Input = 0; Input < 2; ++Input) {
3604         SDValue &Src = Input == 0 ? Src1 : Src2;
3605         if (StartIdx[Input] < 0)
3606           Src = DAG.getUNDEF(VT);
3607         else {
3608           Src = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Src,
3609                             DAG.getVectorIdxConstant(StartIdx[Input], DL));
3610         }
3611       }
3612 
3613       // Calculate new mask.
3614       SmallVector<int, 8> MappedOps(Mask.begin(), Mask.end());
3615       for (int &Idx : MappedOps) {
3616         if (Idx >= (int)SrcNumElts)
3617           Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
3618         else if (Idx >= 0)
3619           Idx -= StartIdx[0];
3620       }
3621 
3622       setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, MappedOps));
3623       return;
3624     }
3625   }
3626 
3627   // We can't use either concat vectors or extract subvectors so fall back to
3628   // replacing the shuffle with extract and build vector.
3629   // to insert and build vector.
3630   EVT EltVT = VT.getVectorElementType();
3631   SmallVector<SDValue,8> Ops;
3632   for (int Idx : Mask) {
3633     SDValue Res;
3634 
3635     if (Idx < 0) {
3636       Res = DAG.getUNDEF(EltVT);
3637     } else {
3638       SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
3639       if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts;
3640 
3641       Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Src,
3642                         DAG.getVectorIdxConstant(Idx, DL));
3643     }
3644 
3645     Ops.push_back(Res);
3646   }
3647 
3648   setValue(&I, DAG.getBuildVector(VT, DL, Ops));
3649 }
3650 
3651 void SelectionDAGBuilder::visitInsertValue(const User &I) {
3652   ArrayRef<unsigned> Indices;
3653   if (const InsertValueInst *IV = dyn_cast<InsertValueInst>(&I))
3654     Indices = IV->getIndices();
3655   else
3656     Indices = cast<ConstantExpr>(&I)->getIndices();
3657 
3658   const Value *Op0 = I.getOperand(0);
3659   const Value *Op1 = I.getOperand(1);
3660   Type *AggTy = I.getType();
3661   Type *ValTy = Op1->getType();
3662   bool IntoUndef = isa<UndefValue>(Op0);
3663   bool FromUndef = isa<UndefValue>(Op1);
3664 
3665   unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
3666 
3667   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3668   SmallVector<EVT, 4> AggValueVTs;
3669   ComputeValueVTs(TLI, DAG.getDataLayout(), AggTy, AggValueVTs);
3670   SmallVector<EVT, 4> ValValueVTs;
3671   ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
3672 
3673   unsigned NumAggValues = AggValueVTs.size();
3674   unsigned NumValValues = ValValueVTs.size();
3675   SmallVector<SDValue, 4> Values(NumAggValues);
3676 
3677   // Ignore an insertvalue that produces an empty object
3678   if (!NumAggValues) {
3679     setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
3680     return;
3681   }
3682 
3683   SDValue Agg = getValue(Op0);
3684   unsigned i = 0;
3685   // Copy the beginning value(s) from the original aggregate.
3686   for (; i != LinearIndex; ++i)
3687     Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3688                 SDValue(Agg.getNode(), Agg.getResNo() + i);
3689   // Copy values from the inserted value(s).
3690   if (NumValValues) {
3691     SDValue Val = getValue(Op1);
3692     for (; i != LinearIndex + NumValValues; ++i)
3693       Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3694                   SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
3695   }
3696   // Copy remaining value(s) from the original aggregate.
3697   for (; i != NumAggValues; ++i)
3698     Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3699                 SDValue(Agg.getNode(), Agg.getResNo() + i);
3700 
3701   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3702                            DAG.getVTList(AggValueVTs), Values));
3703 }
3704 
3705 void SelectionDAGBuilder::visitExtractValue(const User &I) {
3706   ArrayRef<unsigned> Indices;
3707   if (const ExtractValueInst *EV = dyn_cast<ExtractValueInst>(&I))
3708     Indices = EV->getIndices();
3709   else
3710     Indices = cast<ConstantExpr>(&I)->getIndices();
3711 
3712   const Value *Op0 = I.getOperand(0);
3713   Type *AggTy = Op0->getType();
3714   Type *ValTy = I.getType();
3715   bool OutOfUndef = isa<UndefValue>(Op0);
3716 
3717   unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
3718 
3719   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3720   SmallVector<EVT, 4> ValValueVTs;
3721   ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
3722 
3723   unsigned NumValValues = ValValueVTs.size();
3724 
3725   // Ignore a extractvalue that produces an empty object
3726   if (!NumValValues) {
3727     setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
3728     return;
3729   }
3730 
3731   SmallVector<SDValue, 4> Values(NumValValues);
3732 
3733   SDValue Agg = getValue(Op0);
3734   // Copy out the selected value(s).
3735   for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
3736     Values[i - LinearIndex] =
3737       OutOfUndef ?
3738         DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
3739         SDValue(Agg.getNode(), Agg.getResNo() + i);
3740 
3741   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3742                            DAG.getVTList(ValValueVTs), Values));
3743 }
3744 
3745 void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
3746   Value *Op0 = I.getOperand(0);
3747   // Note that the pointer operand may be a vector of pointers. Take the scalar
3748   // element which holds a pointer.
3749   unsigned AS = Op0->getType()->getScalarType()->getPointerAddressSpace();
3750   SDValue N = getValue(Op0);
3751   SDLoc dl = getCurSDLoc();
3752   auto &TLI = DAG.getTargetLoweringInfo();
3753   MVT PtrTy = TLI.getPointerTy(DAG.getDataLayout(), AS);
3754   MVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout(), AS);
3755 
3756   // Normalize Vector GEP - all scalar operands should be converted to the
3757   // splat vector.
3758   bool IsVectorGEP = I.getType()->isVectorTy();
3759   ElementCount VectorElementCount =
3760       IsVectorGEP ? cast<VectorType>(I.getType())->getElementCount()
3761                   : ElementCount(0, false);
3762 
3763   if (IsVectorGEP && !N.getValueType().isVector()) {
3764     LLVMContext &Context = *DAG.getContext();
3765     EVT VT = EVT::getVectorVT(Context, N.getValueType(), VectorElementCount);
3766     if (VectorElementCount.Scalable)
3767       N = DAG.getSplatVector(VT, dl, N);
3768     else
3769       N = DAG.getSplatBuildVector(VT, dl, N);
3770   }
3771 
3772   for (gep_type_iterator GTI = gep_type_begin(&I), E = gep_type_end(&I);
3773        GTI != E; ++GTI) {
3774     const Value *Idx = GTI.getOperand();
3775     if (StructType *StTy = GTI.getStructTypeOrNull()) {
3776       unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
3777       if (Field) {
3778         // N = N + Offset
3779         uint64_t Offset = DL->getStructLayout(StTy)->getElementOffset(Field);
3780 
3781         // In an inbounds GEP with an offset that is nonnegative even when
3782         // interpreted as signed, assume there is no unsigned overflow.
3783         SDNodeFlags Flags;
3784         if (int64_t(Offset) >= 0 && cast<GEPOperator>(I).isInBounds())
3785           Flags.setNoUnsignedWrap(true);
3786 
3787         N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N,
3788                         DAG.getConstant(Offset, dl, N.getValueType()), Flags);
3789       }
3790     } else {
3791       // IdxSize is the width of the arithmetic according to IR semantics.
3792       // In SelectionDAG, we may prefer to do arithmetic in a wider bitwidth
3793       // (and fix up the result later).
3794       unsigned IdxSize = DAG.getDataLayout().getIndexSizeInBits(AS);
3795       MVT IdxTy = MVT::getIntegerVT(IdxSize);
3796       TypeSize ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
3797       // We intentionally mask away the high bits here; ElementSize may not
3798       // fit in IdxTy.
3799       APInt ElementMul(IdxSize, ElementSize.getKnownMinSize());
3800       bool ElementScalable = ElementSize.isScalable();
3801 
3802       // If this is a scalar constant or a splat vector of constants,
3803       // handle it quickly.
3804       const auto *C = dyn_cast<Constant>(Idx);
3805       if (C && isa<VectorType>(C->getType()))
3806         C = C->getSplatValue();
3807 
3808       const auto *CI = dyn_cast_or_null<ConstantInt>(C);
3809       if (CI && CI->isZero())
3810         continue;
3811       if (CI && !ElementScalable) {
3812         APInt Offs = ElementMul * CI->getValue().sextOrTrunc(IdxSize);
3813         LLVMContext &Context = *DAG.getContext();
3814         SDValue OffsVal;
3815         if (IsVectorGEP)
3816           OffsVal = DAG.getConstant(
3817               Offs, dl, EVT::getVectorVT(Context, IdxTy, VectorElementCount));
3818         else
3819           OffsVal = DAG.getConstant(Offs, dl, IdxTy);
3820 
3821         // In an inbounds GEP with an offset that is nonnegative even when
3822         // interpreted as signed, assume there is no unsigned overflow.
3823         SDNodeFlags Flags;
3824         if (Offs.isNonNegative() && cast<GEPOperator>(I).isInBounds())
3825           Flags.setNoUnsignedWrap(true);
3826 
3827         OffsVal = DAG.getSExtOrTrunc(OffsVal, dl, N.getValueType());
3828 
3829         N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N, OffsVal, Flags);
3830         continue;
3831       }
3832 
3833       // N = N + Idx * ElementMul;
3834       SDValue IdxN = getValue(Idx);
3835 
3836       if (!IdxN.getValueType().isVector() && IsVectorGEP) {
3837         EVT VT = EVT::getVectorVT(*Context, IdxN.getValueType(),
3838                                   VectorElementCount);
3839         if (VectorElementCount.Scalable)
3840           IdxN = DAG.getSplatVector(VT, dl, IdxN);
3841         else
3842           IdxN = DAG.getSplatBuildVector(VT, dl, IdxN);
3843       }
3844 
3845       // If the index is smaller or larger than intptr_t, truncate or extend
3846       // it.
3847       IdxN = DAG.getSExtOrTrunc(IdxN, dl, N.getValueType());
3848 
3849       if (ElementScalable) {
3850         EVT VScaleTy = N.getValueType().getScalarType();
3851         SDValue VScale = DAG.getNode(
3852             ISD::VSCALE, dl, VScaleTy,
3853             DAG.getConstant(ElementMul.getZExtValue(), dl, VScaleTy));
3854         if (IsVectorGEP)
3855           VScale = DAG.getSplatVector(N.getValueType(), dl, VScale);
3856         IdxN = DAG.getNode(ISD::MUL, dl, N.getValueType(), IdxN, VScale);
3857       } else {
3858         // If this is a multiply by a power of two, turn it into a shl
3859         // immediately.  This is a very common case.
3860         if (ElementMul != 1) {
3861           if (ElementMul.isPowerOf2()) {
3862             unsigned Amt = ElementMul.logBase2();
3863             IdxN = DAG.getNode(ISD::SHL, dl,
3864                                N.getValueType(), IdxN,
3865                                DAG.getConstant(Amt, dl, IdxN.getValueType()));
3866           } else {
3867             SDValue Scale = DAG.getConstant(ElementMul.getZExtValue(), dl,
3868                                             IdxN.getValueType());
3869             IdxN = DAG.getNode(ISD::MUL, dl,
3870                                N.getValueType(), IdxN, Scale);
3871           }
3872         }
3873       }
3874 
3875       N = DAG.getNode(ISD::ADD, dl,
3876                       N.getValueType(), N, IdxN);
3877     }
3878   }
3879 
3880   if (PtrMemTy != PtrTy && !cast<GEPOperator>(I).isInBounds())
3881     N = DAG.getPtrExtendInReg(N, dl, PtrMemTy);
3882 
3883   setValue(&I, N);
3884 }
3885 
3886 void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
3887   // If this is a fixed sized alloca in the entry block of the function,
3888   // allocate it statically on the stack.
3889   if (FuncInfo.StaticAllocaMap.count(&I))
3890     return;   // getValue will auto-populate this.
3891 
3892   SDLoc dl = getCurSDLoc();
3893   Type *Ty = I.getAllocatedType();
3894   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3895   auto &DL = DAG.getDataLayout();
3896   uint64_t TySize = DL.getTypeAllocSize(Ty);
3897   MaybeAlign Alignment = std::max(DL.getPrefTypeAlign(Ty), I.getAlign());
3898 
3899   SDValue AllocSize = getValue(I.getArraySize());
3900 
3901   EVT IntPtr = TLI.getPointerTy(DAG.getDataLayout(), DL.getAllocaAddrSpace());
3902   if (AllocSize.getValueType() != IntPtr)
3903     AllocSize = DAG.getZExtOrTrunc(AllocSize, dl, IntPtr);
3904 
3905   AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr,
3906                           AllocSize,
3907                           DAG.getConstant(TySize, dl, IntPtr));
3908 
3909   // Handle alignment.  If the requested alignment is less than or equal to
3910   // the stack alignment, ignore it.  If the size is greater than or equal to
3911   // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
3912   Align StackAlign = DAG.getSubtarget().getFrameLowering()->getStackAlign();
3913   if (*Alignment <= StackAlign)
3914     Alignment = None;
3915 
3916   const uint64_t StackAlignMask = StackAlign.value() - 1U;
3917   // Round the size of the allocation up to the stack alignment size
3918   // by add SA-1 to the size. This doesn't overflow because we're computing
3919   // an address inside an alloca.
3920   SDNodeFlags Flags;
3921   Flags.setNoUnsignedWrap(true);
3922   AllocSize = DAG.getNode(ISD::ADD, dl, AllocSize.getValueType(), AllocSize,
3923                           DAG.getConstant(StackAlignMask, dl, IntPtr), Flags);
3924 
3925   // Mask out the low bits for alignment purposes.
3926   AllocSize = DAG.getNode(ISD::AND, dl, AllocSize.getValueType(), AllocSize,
3927                           DAG.getConstant(~StackAlignMask, dl, IntPtr));
3928 
3929   SDValue Ops[] = {
3930       getRoot(), AllocSize,
3931       DAG.getConstant(Alignment ? Alignment->value() : 0, dl, IntPtr)};
3932   SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
3933   SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, dl, VTs, Ops);
3934   setValue(&I, DSA);
3935   DAG.setRoot(DSA.getValue(1));
3936 
3937   assert(FuncInfo.MF->getFrameInfo().hasVarSizedObjects());
3938 }
3939 
3940 void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
3941   if (I.isAtomic())
3942     return visitAtomicLoad(I);
3943 
3944   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3945   const Value *SV = I.getOperand(0);
3946   if (TLI.supportSwiftError()) {
3947     // Swifterror values can come from either a function parameter with
3948     // swifterror attribute or an alloca with swifterror attribute.
3949     if (const Argument *Arg = dyn_cast<Argument>(SV)) {
3950       if (Arg->hasSwiftErrorAttr())
3951         return visitLoadFromSwiftError(I);
3952     }
3953 
3954     if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
3955       if (Alloca->isSwiftError())
3956         return visitLoadFromSwiftError(I);
3957     }
3958   }
3959 
3960   SDValue Ptr = getValue(SV);
3961 
3962   Type *Ty = I.getType();
3963   Align Alignment = I.getAlign();
3964 
3965   AAMDNodes AAInfo;
3966   I.getAAMetadata(AAInfo);
3967   const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
3968 
3969   SmallVector<EVT, 4> ValueVTs, MemVTs;
3970   SmallVector<uint64_t, 4> Offsets;
3971   ComputeValueVTs(TLI, DAG.getDataLayout(), Ty, ValueVTs, &MemVTs, &Offsets);
3972   unsigned NumValues = ValueVTs.size();
3973   if (NumValues == 0)
3974     return;
3975 
3976   bool isVolatile = I.isVolatile();
3977 
3978   SDValue Root;
3979   bool ConstantMemory = false;
3980   if (isVolatile)
3981     // Serialize volatile loads with other side effects.
3982     Root = getRoot();
3983   else if (NumValues > MaxParallelChains)
3984     Root = getMemoryRoot();
3985   else if (AA &&
3986            AA->pointsToConstantMemory(MemoryLocation(
3987                SV,
3988                LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)),
3989                AAInfo))) {
3990     // Do not serialize (non-volatile) loads of constant memory with anything.
3991     Root = DAG.getEntryNode();
3992     ConstantMemory = true;
3993   } else {
3994     // Do not serialize non-volatile loads against each other.
3995     Root = DAG.getRoot();
3996   }
3997 
3998   SDLoc dl = getCurSDLoc();
3999 
4000   if (isVolatile)
4001     Root = TLI.prepareVolatileOrAtomicLoad(Root, dl, DAG);
4002 
4003   // An aggregate load cannot wrap around the address space, so offsets to its
4004   // parts don't wrap either.
4005   SDNodeFlags Flags;
4006   Flags.setNoUnsignedWrap(true);
4007 
4008   SmallVector<SDValue, 4> Values(NumValues);
4009   SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
4010   EVT PtrVT = Ptr.getValueType();
4011 
4012   MachineMemOperand::Flags MMOFlags
4013     = TLI.getLoadMemOperandFlags(I, DAG.getDataLayout());
4014 
4015   unsigned ChainI = 0;
4016   for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4017     // Serializing loads here may result in excessive register pressure, and
4018     // TokenFactor places arbitrary choke points on the scheduler. SD scheduling
4019     // could recover a bit by hoisting nodes upward in the chain by recognizing
4020     // they are side-effect free or do not alias. The optimizer should really
4021     // avoid this case by converting large object/array copies to llvm.memcpy
4022     // (MaxParallelChains should always remain as failsafe).
4023     if (ChainI == MaxParallelChains) {
4024       assert(PendingLoads.empty() && "PendingLoads must be serialized first");
4025       SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4026                                   makeArrayRef(Chains.data(), ChainI));
4027       Root = Chain;
4028       ChainI = 0;
4029     }
4030     SDValue A = DAG.getNode(ISD::ADD, dl,
4031                             PtrVT, Ptr,
4032                             DAG.getConstant(Offsets[i], dl, PtrVT),
4033                             Flags);
4034 
4035     SDValue L = DAG.getLoad(MemVTs[i], dl, Root, A,
4036                             MachinePointerInfo(SV, Offsets[i]), Alignment,
4037                             MMOFlags, AAInfo, Ranges);
4038     Chains[ChainI] = L.getValue(1);
4039 
4040     if (MemVTs[i] != ValueVTs[i])
4041       L = DAG.getZExtOrTrunc(L, dl, ValueVTs[i]);
4042 
4043     Values[i] = L;
4044   }
4045 
4046   if (!ConstantMemory) {
4047     SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4048                                 makeArrayRef(Chains.data(), ChainI));
4049     if (isVolatile)
4050       DAG.setRoot(Chain);
4051     else
4052       PendingLoads.push_back(Chain);
4053   }
4054 
4055   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, dl,
4056                            DAG.getVTList(ValueVTs), Values));
4057 }
4058 
4059 void SelectionDAGBuilder::visitStoreToSwiftError(const StoreInst &I) {
4060   assert(DAG.getTargetLoweringInfo().supportSwiftError() &&
4061          "call visitStoreToSwiftError when backend supports swifterror");
4062 
4063   SmallVector<EVT, 4> ValueVTs;
4064   SmallVector<uint64_t, 4> Offsets;
4065   const Value *SrcV = I.getOperand(0);
4066   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
4067                   SrcV->getType(), ValueVTs, &Offsets);
4068   assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
4069          "expect a single EVT for swifterror");
4070 
4071   SDValue Src = getValue(SrcV);
4072   // Create a virtual register, then update the virtual register.
4073   Register VReg =
4074       SwiftError.getOrCreateVRegDefAt(&I, FuncInfo.MBB, I.getPointerOperand());
4075   // Chain, DL, Reg, N or Chain, DL, Reg, N, Glue
4076   // Chain can be getRoot or getControlRoot.
4077   SDValue CopyNode = DAG.getCopyToReg(getRoot(), getCurSDLoc(), VReg,
4078                                       SDValue(Src.getNode(), Src.getResNo()));
4079   DAG.setRoot(CopyNode);
4080 }
4081 
4082 void SelectionDAGBuilder::visitLoadFromSwiftError(const LoadInst &I) {
4083   assert(DAG.getTargetLoweringInfo().supportSwiftError() &&
4084          "call visitLoadFromSwiftError when backend supports swifterror");
4085 
4086   assert(!I.isVolatile() &&
4087          !I.hasMetadata(LLVMContext::MD_nontemporal) &&
4088          !I.hasMetadata(LLVMContext::MD_invariant_load) &&
4089          "Support volatile, non temporal, invariant for load_from_swift_error");
4090 
4091   const Value *SV = I.getOperand(0);
4092   Type *Ty = I.getType();
4093   AAMDNodes AAInfo;
4094   I.getAAMetadata(AAInfo);
4095   assert(
4096       (!AA ||
4097        !AA->pointsToConstantMemory(MemoryLocation(
4098            SV, LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)),
4099            AAInfo))) &&
4100       "load_from_swift_error should not be constant memory");
4101 
4102   SmallVector<EVT, 4> ValueVTs;
4103   SmallVector<uint64_t, 4> Offsets;
4104   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), Ty,
4105                   ValueVTs, &Offsets);
4106   assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
4107          "expect a single EVT for swifterror");
4108 
4109   // Chain, DL, Reg, VT, Glue or Chain, DL, Reg, VT
4110   SDValue L = DAG.getCopyFromReg(
4111       getRoot(), getCurSDLoc(),
4112       SwiftError.getOrCreateVRegUseAt(&I, FuncInfo.MBB, SV), ValueVTs[0]);
4113 
4114   setValue(&I, L);
4115 }
4116 
4117 void SelectionDAGBuilder::visitStore(const StoreInst &I) {
4118   if (I.isAtomic())
4119     return visitAtomicStore(I);
4120 
4121   const Value *SrcV = I.getOperand(0);
4122   const Value *PtrV = I.getOperand(1);
4123 
4124   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4125   if (TLI.supportSwiftError()) {
4126     // Swifterror values can come from either a function parameter with
4127     // swifterror attribute or an alloca with swifterror attribute.
4128     if (const Argument *Arg = dyn_cast<Argument>(PtrV)) {
4129       if (Arg->hasSwiftErrorAttr())
4130         return visitStoreToSwiftError(I);
4131     }
4132 
4133     if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
4134       if (Alloca->isSwiftError())
4135         return visitStoreToSwiftError(I);
4136     }
4137   }
4138 
4139   SmallVector<EVT, 4> ValueVTs, MemVTs;
4140   SmallVector<uint64_t, 4> Offsets;
4141   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
4142                   SrcV->getType(), ValueVTs, &MemVTs, &Offsets);
4143   unsigned NumValues = ValueVTs.size();
4144   if (NumValues == 0)
4145     return;
4146 
4147   // Get the lowered operands. Note that we do this after
4148   // checking if NumResults is zero, because with zero results
4149   // the operands won't have values in the map.
4150   SDValue Src = getValue(SrcV);
4151   SDValue Ptr = getValue(PtrV);
4152 
4153   SDValue Root = I.isVolatile() ? getRoot() : getMemoryRoot();
4154   SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
4155   SDLoc dl = getCurSDLoc();
4156   Align Alignment = I.getAlign();
4157   AAMDNodes AAInfo;
4158   I.getAAMetadata(AAInfo);
4159 
4160   auto MMOFlags = TLI.getStoreMemOperandFlags(I, DAG.getDataLayout());
4161 
4162   // An aggregate load cannot wrap around the address space, so offsets to its
4163   // parts don't wrap either.
4164   SDNodeFlags Flags;
4165   Flags.setNoUnsignedWrap(true);
4166 
4167   unsigned ChainI = 0;
4168   for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4169     // See visitLoad comments.
4170     if (ChainI == MaxParallelChains) {
4171       SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4172                                   makeArrayRef(Chains.data(), ChainI));
4173       Root = Chain;
4174       ChainI = 0;
4175     }
4176     SDValue Add = DAG.getMemBasePlusOffset(Ptr, Offsets[i], dl, Flags);
4177     SDValue Val = SDValue(Src.getNode(), Src.getResNo() + i);
4178     if (MemVTs[i] != ValueVTs[i])
4179       Val = DAG.getPtrExtOrTrunc(Val, dl, MemVTs[i]);
4180     SDValue St =
4181         DAG.getStore(Root, dl, Val, Add, MachinePointerInfo(PtrV, Offsets[i]),
4182                      Alignment, MMOFlags, AAInfo);
4183     Chains[ChainI] = St;
4184   }
4185 
4186   SDValue StoreNode = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4187                                   makeArrayRef(Chains.data(), ChainI));
4188   DAG.setRoot(StoreNode);
4189 }
4190 
4191 void SelectionDAGBuilder::visitMaskedStore(const CallInst &I,
4192                                            bool IsCompressing) {
4193   SDLoc sdl = getCurSDLoc();
4194 
4195   auto getMaskedStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4196                                MaybeAlign &Alignment) {
4197     // llvm.masked.store.*(Src0, Ptr, alignment, Mask)
4198     Src0 = I.getArgOperand(0);
4199     Ptr = I.getArgOperand(1);
4200     Alignment =
4201         MaybeAlign(cast<ConstantInt>(I.getArgOperand(2))->getZExtValue());
4202     Mask = I.getArgOperand(3);
4203   };
4204   auto getCompressingStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4205                                     MaybeAlign &Alignment) {
4206     // llvm.masked.compressstore.*(Src0, Ptr, Mask)
4207     Src0 = I.getArgOperand(0);
4208     Ptr = I.getArgOperand(1);
4209     Mask = I.getArgOperand(2);
4210     Alignment = None;
4211   };
4212 
4213   Value  *PtrOperand, *MaskOperand, *Src0Operand;
4214   MaybeAlign Alignment;
4215   if (IsCompressing)
4216     getCompressingStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4217   else
4218     getMaskedStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4219 
4220   SDValue Ptr = getValue(PtrOperand);
4221   SDValue Src0 = getValue(Src0Operand);
4222   SDValue Mask = getValue(MaskOperand);
4223   SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
4224 
4225   EVT VT = Src0.getValueType();
4226   if (!Alignment)
4227     Alignment = DAG.getEVTAlign(VT);
4228 
4229   AAMDNodes AAInfo;
4230   I.getAAMetadata(AAInfo);
4231 
4232   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4233       MachinePointerInfo(PtrOperand), MachineMemOperand::MOStore,
4234       // TODO: Make MachineMemOperands aware of scalable
4235       // vectors.
4236       VT.getStoreSize().getKnownMinSize(), *Alignment, AAInfo);
4237   SDValue StoreNode =
4238       DAG.getMaskedStore(getMemoryRoot(), sdl, Src0, Ptr, Offset, Mask, VT, MMO,
4239                          ISD::UNINDEXED, false /* Truncating */, IsCompressing);
4240   DAG.setRoot(StoreNode);
4241   setValue(&I, StoreNode);
4242 }
4243 
4244 // Get a uniform base for the Gather/Scatter intrinsic.
4245 // The first argument of the Gather/Scatter intrinsic is a vector of pointers.
4246 // We try to represent it as a base pointer + vector of indices.
4247 // Usually, the vector of pointers comes from a 'getelementptr' instruction.
4248 // The first operand of the GEP may be a single pointer or a vector of pointers
4249 // Example:
4250 //   %gep.ptr = getelementptr i32, <8 x i32*> %vptr, <8 x i32> %ind
4251 //  or
4252 //   %gep.ptr = getelementptr i32, i32* %ptr,        <8 x i32> %ind
4253 // %res = call <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> %gep.ptr, ..
4254 //
4255 // When the first GEP operand is a single pointer - it is the uniform base we
4256 // are looking for. If first operand of the GEP is a splat vector - we
4257 // extract the splat value and use it as a uniform base.
4258 // In all other cases the function returns 'false'.
4259 static bool getUniformBase(const Value *Ptr, SDValue &Base, SDValue &Index,
4260                            ISD::MemIndexType &IndexType, SDValue &Scale,
4261                            SelectionDAGBuilder *SDB, const BasicBlock *CurBB) {
4262   SelectionDAG& DAG = SDB->DAG;
4263   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4264   const DataLayout &DL = DAG.getDataLayout();
4265 
4266   assert(Ptr->getType()->isVectorTy() && "Uexpected pointer type");
4267 
4268   // Handle splat constant pointer.
4269   if (auto *C = dyn_cast<Constant>(Ptr)) {
4270     C = C->getSplatValue();
4271     if (!C)
4272       return false;
4273 
4274     Base = SDB->getValue(C);
4275 
4276     unsigned NumElts = cast<VectorType>(Ptr->getType())->getNumElements();
4277     EVT VT = EVT::getVectorVT(*DAG.getContext(), TLI.getPointerTy(DL), NumElts);
4278     Index = DAG.getConstant(0, SDB->getCurSDLoc(), VT);
4279     IndexType = ISD::SIGNED_SCALED;
4280     Scale = DAG.getTargetConstant(1, SDB->getCurSDLoc(), TLI.getPointerTy(DL));
4281     return true;
4282   }
4283 
4284   const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
4285   if (!GEP || GEP->getParent() != CurBB)
4286     return false;
4287 
4288   if (GEP->getNumOperands() != 2)
4289     return false;
4290 
4291   const Value *BasePtr = GEP->getPointerOperand();
4292   const Value *IndexVal = GEP->getOperand(GEP->getNumOperands() - 1);
4293 
4294   // Make sure the base is scalar and the index is a vector.
4295   if (BasePtr->getType()->isVectorTy() || !IndexVal->getType()->isVectorTy())
4296     return false;
4297 
4298   Base = SDB->getValue(BasePtr);
4299   Index = SDB->getValue(IndexVal);
4300   IndexType = ISD::SIGNED_SCALED;
4301   Scale = DAG.getTargetConstant(
4302               DL.getTypeAllocSize(GEP->getResultElementType()),
4303               SDB->getCurSDLoc(), TLI.getPointerTy(DL));
4304   return true;
4305 }
4306 
4307 void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
4308   SDLoc sdl = getCurSDLoc();
4309 
4310   // llvm.masked.scatter.*(Src0, Ptrs, alignment, Mask)
4311   const Value *Ptr = I.getArgOperand(1);
4312   SDValue Src0 = getValue(I.getArgOperand(0));
4313   SDValue Mask = getValue(I.getArgOperand(3));
4314   EVT VT = Src0.getValueType();
4315   MaybeAlign Alignment(cast<ConstantInt>(I.getArgOperand(2))->getZExtValue());
4316   if (!Alignment)
4317     Alignment = DAG.getEVTAlign(VT);
4318   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4319 
4320   AAMDNodes AAInfo;
4321   I.getAAMetadata(AAInfo);
4322 
4323   SDValue Base;
4324   SDValue Index;
4325   ISD::MemIndexType IndexType;
4326   SDValue Scale;
4327   bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this,
4328                                     I.getParent());
4329 
4330   unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
4331   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4332       MachinePointerInfo(AS), MachineMemOperand::MOStore,
4333       // TODO: Make MachineMemOperands aware of scalable
4334       // vectors.
4335       MemoryLocation::UnknownSize, *Alignment, AAInfo);
4336   if (!UniformBase) {
4337     Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4338     Index = getValue(Ptr);
4339     IndexType = ISD::SIGNED_SCALED;
4340     Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4341   }
4342   SDValue Ops[] = { getMemoryRoot(), Src0, Mask, Base, Index, Scale };
4343   SDValue Scatter = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), VT, sdl,
4344                                          Ops, MMO, IndexType);
4345   DAG.setRoot(Scatter);
4346   setValue(&I, Scatter);
4347 }
4348 
4349 void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) {
4350   SDLoc sdl = getCurSDLoc();
4351 
4352   auto getMaskedLoadOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4353                               MaybeAlign &Alignment) {
4354     // @llvm.masked.load.*(Ptr, alignment, Mask, Src0)
4355     Ptr = I.getArgOperand(0);
4356     Alignment =
4357         MaybeAlign(cast<ConstantInt>(I.getArgOperand(1))->getZExtValue());
4358     Mask = I.getArgOperand(2);
4359     Src0 = I.getArgOperand(3);
4360   };
4361   auto getExpandingLoadOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4362                                  MaybeAlign &Alignment) {
4363     // @llvm.masked.expandload.*(Ptr, Mask, Src0)
4364     Ptr = I.getArgOperand(0);
4365     Alignment = None;
4366     Mask = I.getArgOperand(1);
4367     Src0 = I.getArgOperand(2);
4368   };
4369 
4370   Value  *PtrOperand, *MaskOperand, *Src0Operand;
4371   MaybeAlign Alignment;
4372   if (IsExpanding)
4373     getExpandingLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4374   else
4375     getMaskedLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4376 
4377   SDValue Ptr = getValue(PtrOperand);
4378   SDValue Src0 = getValue(Src0Operand);
4379   SDValue Mask = getValue(MaskOperand);
4380   SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
4381 
4382   EVT VT = Src0.getValueType();
4383   if (!Alignment)
4384     Alignment = DAG.getEVTAlign(VT);
4385 
4386   AAMDNodes AAInfo;
4387   I.getAAMetadata(AAInfo);
4388   const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
4389 
4390   // Do not serialize masked loads of constant memory with anything.
4391   MemoryLocation ML;
4392   if (VT.isScalableVector())
4393     ML = MemoryLocation(PtrOperand);
4394   else
4395     ML = MemoryLocation(PtrOperand, LocationSize::precise(
4396                            DAG.getDataLayout().getTypeStoreSize(I.getType())),
4397                            AAInfo);
4398   bool AddToChain = !AA || !AA->pointsToConstantMemory(ML);
4399 
4400   SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
4401 
4402   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4403       MachinePointerInfo(PtrOperand), MachineMemOperand::MOLoad,
4404       // TODO: Make MachineMemOperands aware of scalable
4405       // vectors.
4406       VT.getStoreSize().getKnownMinSize(), *Alignment, AAInfo, Ranges);
4407 
4408   SDValue Load =
4409       DAG.getMaskedLoad(VT, sdl, InChain, Ptr, Offset, Mask, Src0, VT, MMO,
4410                         ISD::UNINDEXED, ISD::NON_EXTLOAD, IsExpanding);
4411   if (AddToChain)
4412     PendingLoads.push_back(Load.getValue(1));
4413   setValue(&I, Load);
4414 }
4415 
4416 void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
4417   SDLoc sdl = getCurSDLoc();
4418 
4419   // @llvm.masked.gather.*(Ptrs, alignment, Mask, Src0)
4420   const Value *Ptr = I.getArgOperand(0);
4421   SDValue Src0 = getValue(I.getArgOperand(3));
4422   SDValue Mask = getValue(I.getArgOperand(2));
4423 
4424   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4425   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4426   MaybeAlign Alignment(cast<ConstantInt>(I.getArgOperand(1))->getZExtValue());
4427   if (!Alignment)
4428     Alignment = DAG.getEVTAlign(VT);
4429 
4430   AAMDNodes AAInfo;
4431   I.getAAMetadata(AAInfo);
4432   const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
4433 
4434   SDValue Root = DAG.getRoot();
4435   SDValue Base;
4436   SDValue Index;
4437   ISD::MemIndexType IndexType;
4438   SDValue Scale;
4439   bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this,
4440                                     I.getParent());
4441   unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
4442   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4443       MachinePointerInfo(AS), MachineMemOperand::MOLoad,
4444       // TODO: Make MachineMemOperands aware of scalable
4445       // vectors.
4446       MemoryLocation::UnknownSize, *Alignment, AAInfo, Ranges);
4447 
4448   if (!UniformBase) {
4449     Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4450     Index = getValue(Ptr);
4451     IndexType = ISD::SIGNED_SCALED;
4452     Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4453   }
4454   SDValue Ops[] = { Root, Src0, Mask, Base, Index, Scale };
4455   SDValue Gather = DAG.getMaskedGather(DAG.getVTList(VT, MVT::Other), VT, sdl,
4456                                        Ops, MMO, IndexType);
4457 
4458   PendingLoads.push_back(Gather.getValue(1));
4459   setValue(&I, Gather);
4460 }
4461 
4462 void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
4463   SDLoc dl = getCurSDLoc();
4464   AtomicOrdering SuccessOrdering = I.getSuccessOrdering();
4465   AtomicOrdering FailureOrdering = I.getFailureOrdering();
4466   SyncScope::ID SSID = I.getSyncScopeID();
4467 
4468   SDValue InChain = getRoot();
4469 
4470   MVT MemVT = getValue(I.getCompareOperand()).getSimpleValueType();
4471   SDVTList VTs = DAG.getVTList(MemVT, MVT::i1, MVT::Other);
4472 
4473   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4474   auto Flags = TLI.getAtomicMemOperandFlags(I, DAG.getDataLayout());
4475 
4476   MachineFunction &MF = DAG.getMachineFunction();
4477   MachineMemOperand *MMO = MF.getMachineMemOperand(
4478       MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4479       DAG.getEVTAlign(MemVT), AAMDNodes(), nullptr, SSID, SuccessOrdering,
4480       FailureOrdering);
4481 
4482   SDValue L = DAG.getAtomicCmpSwap(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS,
4483                                    dl, MemVT, VTs, InChain,
4484                                    getValue(I.getPointerOperand()),
4485                                    getValue(I.getCompareOperand()),
4486                                    getValue(I.getNewValOperand()), MMO);
4487 
4488   SDValue OutChain = L.getValue(2);
4489 
4490   setValue(&I, L);
4491   DAG.setRoot(OutChain);
4492 }
4493 
4494 void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
4495   SDLoc dl = getCurSDLoc();
4496   ISD::NodeType NT;
4497   switch (I.getOperation()) {
4498   default: llvm_unreachable("Unknown atomicrmw operation");
4499   case AtomicRMWInst::Xchg: NT = ISD::ATOMIC_SWAP; break;
4500   case AtomicRMWInst::Add:  NT = ISD::ATOMIC_LOAD_ADD; break;
4501   case AtomicRMWInst::Sub:  NT = ISD::ATOMIC_LOAD_SUB; break;
4502   case AtomicRMWInst::And:  NT = ISD::ATOMIC_LOAD_AND; break;
4503   case AtomicRMWInst::Nand: NT = ISD::ATOMIC_LOAD_NAND; break;
4504   case AtomicRMWInst::Or:   NT = ISD::ATOMIC_LOAD_OR; break;
4505   case AtomicRMWInst::Xor:  NT = ISD::ATOMIC_LOAD_XOR; break;
4506   case AtomicRMWInst::Max:  NT = ISD::ATOMIC_LOAD_MAX; break;
4507   case AtomicRMWInst::Min:  NT = ISD::ATOMIC_LOAD_MIN; break;
4508   case AtomicRMWInst::UMax: NT = ISD::ATOMIC_LOAD_UMAX; break;
4509   case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break;
4510   case AtomicRMWInst::FAdd: NT = ISD::ATOMIC_LOAD_FADD; break;
4511   case AtomicRMWInst::FSub: NT = ISD::ATOMIC_LOAD_FSUB; break;
4512   }
4513   AtomicOrdering Ordering = I.getOrdering();
4514   SyncScope::ID SSID = I.getSyncScopeID();
4515 
4516   SDValue InChain = getRoot();
4517 
4518   auto MemVT = getValue(I.getValOperand()).getSimpleValueType();
4519   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4520   auto Flags = TLI.getAtomicMemOperandFlags(I, DAG.getDataLayout());
4521 
4522   MachineFunction &MF = DAG.getMachineFunction();
4523   MachineMemOperand *MMO = MF.getMachineMemOperand(
4524       MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4525       DAG.getEVTAlign(MemVT), AAMDNodes(), nullptr, SSID, Ordering);
4526 
4527   SDValue L =
4528     DAG.getAtomic(NT, dl, MemVT, InChain,
4529                   getValue(I.getPointerOperand()), getValue(I.getValOperand()),
4530                   MMO);
4531 
4532   SDValue OutChain = L.getValue(1);
4533 
4534   setValue(&I, L);
4535   DAG.setRoot(OutChain);
4536 }
4537 
4538 void SelectionDAGBuilder::visitFence(const FenceInst &I) {
4539   SDLoc dl = getCurSDLoc();
4540   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4541   SDValue Ops[3];
4542   Ops[0] = getRoot();
4543   Ops[1] = DAG.getTargetConstant((unsigned)I.getOrdering(), dl,
4544                                  TLI.getFenceOperandTy(DAG.getDataLayout()));
4545   Ops[2] = DAG.getTargetConstant(I.getSyncScopeID(), dl,
4546                                  TLI.getFenceOperandTy(DAG.getDataLayout()));
4547   DAG.setRoot(DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops));
4548 }
4549 
4550 void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
4551   SDLoc dl = getCurSDLoc();
4552   AtomicOrdering Order = I.getOrdering();
4553   SyncScope::ID SSID = I.getSyncScopeID();
4554 
4555   SDValue InChain = getRoot();
4556 
4557   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4558   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4559   EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getType());
4560 
4561   if (!TLI.supportsUnalignedAtomics() &&
4562       I.getAlignment() < MemVT.getSizeInBits() / 8)
4563     report_fatal_error("Cannot generate unaligned atomic load");
4564 
4565   auto Flags = TLI.getLoadMemOperandFlags(I, DAG.getDataLayout());
4566 
4567   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4568       MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4569       I.getAlign(), AAMDNodes(), nullptr, SSID, Order);
4570 
4571   InChain = TLI.prepareVolatileOrAtomicLoad(InChain, dl, DAG);
4572 
4573   SDValue Ptr = getValue(I.getPointerOperand());
4574 
4575   if (TLI.lowerAtomicLoadAsLoadSDNode(I)) {
4576     // TODO: Once this is better exercised by tests, it should be merged with
4577     // the normal path for loads to prevent future divergence.
4578     SDValue L = DAG.getLoad(MemVT, dl, InChain, Ptr, MMO);
4579     if (MemVT != VT)
4580       L = DAG.getPtrExtOrTrunc(L, dl, VT);
4581 
4582     setValue(&I, L);
4583     SDValue OutChain = L.getValue(1);
4584     if (!I.isUnordered())
4585       DAG.setRoot(OutChain);
4586     else
4587       PendingLoads.push_back(OutChain);
4588     return;
4589   }
4590 
4591   SDValue L = DAG.getAtomic(ISD::ATOMIC_LOAD, dl, MemVT, MemVT, InChain,
4592                             Ptr, MMO);
4593 
4594   SDValue OutChain = L.getValue(1);
4595   if (MemVT != VT)
4596     L = DAG.getPtrExtOrTrunc(L, dl, VT);
4597 
4598   setValue(&I, L);
4599   DAG.setRoot(OutChain);
4600 }
4601 
4602 void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
4603   SDLoc dl = getCurSDLoc();
4604 
4605   AtomicOrdering Ordering = I.getOrdering();
4606   SyncScope::ID SSID = I.getSyncScopeID();
4607 
4608   SDValue InChain = getRoot();
4609 
4610   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4611   EVT MemVT =
4612       TLI.getMemValueType(DAG.getDataLayout(), I.getValueOperand()->getType());
4613 
4614   if (I.getAlignment() < MemVT.getSizeInBits() / 8)
4615     report_fatal_error("Cannot generate unaligned atomic store");
4616 
4617   auto Flags = TLI.getStoreMemOperandFlags(I, DAG.getDataLayout());
4618 
4619   MachineFunction &MF = DAG.getMachineFunction();
4620   MachineMemOperand *MMO = MF.getMachineMemOperand(
4621       MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4622       I.getAlign(), AAMDNodes(), nullptr, SSID, Ordering);
4623 
4624   SDValue Val = getValue(I.getValueOperand());
4625   if (Val.getValueType() != MemVT)
4626     Val = DAG.getPtrExtOrTrunc(Val, dl, MemVT);
4627   SDValue Ptr = getValue(I.getPointerOperand());
4628 
4629   if (TLI.lowerAtomicStoreAsStoreSDNode(I)) {
4630     // TODO: Once this is better exercised by tests, it should be merged with
4631     // the normal path for stores to prevent future divergence.
4632     SDValue S = DAG.getStore(InChain, dl, Val, Ptr, MMO);
4633     DAG.setRoot(S);
4634     return;
4635   }
4636   SDValue OutChain = DAG.getAtomic(ISD::ATOMIC_STORE, dl, MemVT, InChain,
4637                                    Ptr, Val, MMO);
4638 
4639 
4640   DAG.setRoot(OutChain);
4641 }
4642 
4643 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
4644 /// node.
4645 void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
4646                                                unsigned Intrinsic) {
4647   // Ignore the callsite's attributes. A specific call site may be marked with
4648   // readnone, but the lowering code will expect the chain based on the
4649   // definition.
4650   const Function *F = I.getCalledFunction();
4651   bool HasChain = !F->doesNotAccessMemory();
4652   bool OnlyLoad = HasChain && F->onlyReadsMemory();
4653 
4654   // Build the operand list.
4655   SmallVector<SDValue, 8> Ops;
4656   if (HasChain) {  // If this intrinsic has side-effects, chainify it.
4657     if (OnlyLoad) {
4658       // We don't need to serialize loads against other loads.
4659       Ops.push_back(DAG.getRoot());
4660     } else {
4661       Ops.push_back(getRoot());
4662     }
4663   }
4664 
4665   // Info is set by getTgtMemInstrinsic
4666   TargetLowering::IntrinsicInfo Info;
4667   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4668   bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I,
4669                                                DAG.getMachineFunction(),
4670                                                Intrinsic);
4671 
4672   // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
4673   if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID ||
4674       Info.opc == ISD::INTRINSIC_W_CHAIN)
4675     Ops.push_back(DAG.getTargetConstant(Intrinsic, getCurSDLoc(),
4676                                         TLI.getPointerTy(DAG.getDataLayout())));
4677 
4678   // Add all operands of the call to the operand list.
4679   for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
4680     const Value *Arg = I.getArgOperand(i);
4681     if (!I.paramHasAttr(i, Attribute::ImmArg)) {
4682       Ops.push_back(getValue(Arg));
4683       continue;
4684     }
4685 
4686     // Use TargetConstant instead of a regular constant for immarg.
4687     EVT VT = TLI.getValueType(*DL, Arg->getType(), true);
4688     if (const ConstantInt *CI = dyn_cast<ConstantInt>(Arg)) {
4689       assert(CI->getBitWidth() <= 64 &&
4690              "large intrinsic immediates not handled");
4691       Ops.push_back(DAG.getTargetConstant(*CI, SDLoc(), VT));
4692     } else {
4693       Ops.push_back(
4694           DAG.getTargetConstantFP(*cast<ConstantFP>(Arg), SDLoc(), VT));
4695     }
4696   }
4697 
4698   SmallVector<EVT, 4> ValueVTs;
4699   ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);
4700 
4701   if (HasChain)
4702     ValueVTs.push_back(MVT::Other);
4703 
4704   SDVTList VTs = DAG.getVTList(ValueVTs);
4705 
4706   // Create the node.
4707   SDValue Result;
4708   if (IsTgtIntrinsic) {
4709     // This is target intrinsic that touches memory
4710     AAMDNodes AAInfo;
4711     I.getAAMetadata(AAInfo);
4712     Result =
4713         DAG.getMemIntrinsicNode(Info.opc, getCurSDLoc(), VTs, Ops, Info.memVT,
4714                                 MachinePointerInfo(Info.ptrVal, Info.offset),
4715                                 Info.align, Info.flags, Info.size, AAInfo);
4716   } else if (!HasChain) {
4717     Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurSDLoc(), VTs, Ops);
4718   } else if (!I.getType()->isVoidTy()) {
4719     Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurSDLoc(), VTs, Ops);
4720   } else {
4721     Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops);
4722   }
4723 
4724   if (HasChain) {
4725     SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
4726     if (OnlyLoad)
4727       PendingLoads.push_back(Chain);
4728     else
4729       DAG.setRoot(Chain);
4730   }
4731 
4732   if (!I.getType()->isVoidTy()) {
4733     if (VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
4734       EVT VT = TLI.getValueType(DAG.getDataLayout(), PTy);
4735       Result = DAG.getNode(ISD::BITCAST, getCurSDLoc(), VT, Result);
4736     } else
4737       Result = lowerRangeToAssertZExt(DAG, I, Result);
4738 
4739     setValue(&I, Result);
4740   }
4741 }
4742 
4743 /// GetSignificand - Get the significand and build it into a floating-point
4744 /// number with exponent of 1:
4745 ///
4746 ///   Op = (Op & 0x007fffff) | 0x3f800000;
4747 ///
4748 /// where Op is the hexadecimal representation of floating point value.
4749 static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl) {
4750   SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
4751                            DAG.getConstant(0x007fffff, dl, MVT::i32));
4752   SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
4753                            DAG.getConstant(0x3f800000, dl, MVT::i32));
4754   return DAG.getNode(ISD::BITCAST, dl, MVT::f32, t2);
4755 }
4756 
4757 /// GetExponent - Get the exponent:
4758 ///
4759 ///   (float)(int)(((Op & 0x7f800000) >> 23) - 127);
4760 ///
4761 /// where Op is the hexadecimal representation of floating point value.
4762 static SDValue GetExponent(SelectionDAG &DAG, SDValue Op,
4763                            const TargetLowering &TLI, const SDLoc &dl) {
4764   SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
4765                            DAG.getConstant(0x7f800000, dl, MVT::i32));
4766   SDValue t1 = DAG.getNode(
4767       ISD::SRL, dl, MVT::i32, t0,
4768       DAG.getConstant(23, dl, TLI.getPointerTy(DAG.getDataLayout())));
4769   SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
4770                            DAG.getConstant(127, dl, MVT::i32));
4771   return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
4772 }
4773 
4774 /// getF32Constant - Get 32-bit floating point constant.
4775 static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt,
4776                               const SDLoc &dl) {
4777   return DAG.getConstantFP(APFloat(APFloat::IEEEsingle(), APInt(32, Flt)), dl,
4778                            MVT::f32);
4779 }
4780 
4781 static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl,
4782                                        SelectionDAG &DAG) {
4783   // TODO: What fast-math-flags should be set on the floating-point nodes?
4784 
4785   //   IntegerPartOfX = ((int32_t)(t0);
4786   SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
4787 
4788   //   FractionalPartOfX = t0 - (float)IntegerPartOfX;
4789   SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
4790   SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
4791 
4792   //   IntegerPartOfX <<= 23;
4793   IntegerPartOfX = DAG.getNode(
4794       ISD::SHL, dl, MVT::i32, IntegerPartOfX,
4795       DAG.getConstant(23, dl, DAG.getTargetLoweringInfo().getPointerTy(
4796                                   DAG.getDataLayout())));
4797 
4798   SDValue TwoToFractionalPartOfX;
4799   if (LimitFloatPrecision <= 6) {
4800     // For floating-point precision of 6:
4801     //
4802     //   TwoToFractionalPartOfX =
4803     //     0.997535578f +
4804     //       (0.735607626f + 0.252464424f * x) * x;
4805     //
4806     // error 0.0144103317, which is 6 bits
4807     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4808                              getF32Constant(DAG, 0x3e814304, dl));
4809     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4810                              getF32Constant(DAG, 0x3f3c50c8, dl));
4811     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4812     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4813                                          getF32Constant(DAG, 0x3f7f5e7e, dl));
4814   } else if (LimitFloatPrecision <= 12) {
4815     // For floating-point precision of 12:
4816     //
4817     //   TwoToFractionalPartOfX =
4818     //     0.999892986f +
4819     //       (0.696457318f +
4820     //         (0.224338339f + 0.792043434e-1f * x) * x) * x;
4821     //
4822     // error 0.000107046256, which is 13 to 14 bits
4823     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4824                              getF32Constant(DAG, 0x3da235e3, dl));
4825     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4826                              getF32Constant(DAG, 0x3e65b8f3, dl));
4827     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4828     SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4829                              getF32Constant(DAG, 0x3f324b07, dl));
4830     SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4831     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4832                                          getF32Constant(DAG, 0x3f7ff8fd, dl));
4833   } else { // LimitFloatPrecision <= 18
4834     // For floating-point precision of 18:
4835     //
4836     //   TwoToFractionalPartOfX =
4837     //     0.999999982f +
4838     //       (0.693148872f +
4839     //         (0.240227044f +
4840     //           (0.554906021e-1f +
4841     //             (0.961591928e-2f +
4842     //               (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
4843     // error 2.47208000*10^(-7), which is better than 18 bits
4844     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4845                              getF32Constant(DAG, 0x3924b03e, dl));
4846     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4847                              getF32Constant(DAG, 0x3ab24b87, dl));
4848     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4849     SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4850                              getF32Constant(DAG, 0x3c1d8c17, dl));
4851     SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4852     SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4853                              getF32Constant(DAG, 0x3d634a1d, dl));
4854     SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4855     SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4856                              getF32Constant(DAG, 0x3e75fe14, dl));
4857     SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4858     SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
4859                               getF32Constant(DAG, 0x3f317234, dl));
4860     SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
4861     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
4862                                          getF32Constant(DAG, 0x3f800000, dl));
4863   }
4864 
4865   // Add the exponent into the result in integer domain.
4866   SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, TwoToFractionalPartOfX);
4867   return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
4868                      DAG.getNode(ISD::ADD, dl, MVT::i32, t13, IntegerPartOfX));
4869 }
4870 
4871 /// expandExp - Lower an exp intrinsic. Handles the special sequences for
4872 /// limited-precision mode.
4873 static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4874                          const TargetLowering &TLI) {
4875   if (Op.getValueType() == MVT::f32 &&
4876       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4877 
4878     // Put the exponent in the right bit position for later addition to the
4879     // final result:
4880     //
4881     // t0 = Op * log2(e)
4882 
4883     // TODO: What fast-math-flags should be set here?
4884     SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
4885                              DAG.getConstantFP(numbers::log2ef, dl, MVT::f32));
4886     return getLimitedPrecisionExp2(t0, dl, DAG);
4887   }
4888 
4889   // No special expansion.
4890   return DAG.getNode(ISD::FEXP, dl, Op.getValueType(), Op);
4891 }
4892 
4893 /// expandLog - Lower a log intrinsic. Handles the special sequences for
4894 /// limited-precision mode.
4895 static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4896                          const TargetLowering &TLI) {
4897   // TODO: What fast-math-flags should be set on the floating-point nodes?
4898 
4899   if (Op.getValueType() == MVT::f32 &&
4900       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4901     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
4902 
4903     // Scale the exponent by log(2).
4904     SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
4905     SDValue LogOfExponent =
4906         DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
4907                     DAG.getConstantFP(numbers::ln2f, dl, MVT::f32));
4908 
4909     // Get the significand and build it into a floating-point number with
4910     // exponent of 1.
4911     SDValue X = GetSignificand(DAG, Op1, dl);
4912 
4913     SDValue LogOfMantissa;
4914     if (LimitFloatPrecision <= 6) {
4915       // For floating-point precision of 6:
4916       //
4917       //   LogofMantissa =
4918       //     -1.1609546f +
4919       //       (1.4034025f - 0.23903021f * x) * x;
4920       //
4921       // error 0.0034276066, which is better than 8 bits
4922       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4923                                getF32Constant(DAG, 0xbe74c456, dl));
4924       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4925                                getF32Constant(DAG, 0x3fb3a2b1, dl));
4926       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4927       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4928                                   getF32Constant(DAG, 0x3f949a29, dl));
4929     } else if (LimitFloatPrecision <= 12) {
4930       // For floating-point precision of 12:
4931       //
4932       //   LogOfMantissa =
4933       //     -1.7417939f +
4934       //       (2.8212026f +
4935       //         (-1.4699568f +
4936       //           (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
4937       //
4938       // error 0.000061011436, which is 14 bits
4939       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4940                                getF32Constant(DAG, 0xbd67b6d6, dl));
4941       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4942                                getF32Constant(DAG, 0x3ee4f4b8, dl));
4943       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4944       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4945                                getF32Constant(DAG, 0x3fbc278b, dl));
4946       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4947       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4948                                getF32Constant(DAG, 0x40348e95, dl));
4949       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4950       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4951                                   getF32Constant(DAG, 0x3fdef31a, dl));
4952     } else { // LimitFloatPrecision <= 18
4953       // For floating-point precision of 18:
4954       //
4955       //   LogOfMantissa =
4956       //     -2.1072184f +
4957       //       (4.2372794f +
4958       //         (-3.7029485f +
4959       //           (2.2781945f +
4960       //             (-0.87823314f +
4961       //               (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
4962       //
4963       // error 0.0000023660568, which is better than 18 bits
4964       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4965                                getF32Constant(DAG, 0xbc91e5ac, dl));
4966       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4967                                getF32Constant(DAG, 0x3e4350aa, dl));
4968       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4969       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4970                                getF32Constant(DAG, 0x3f60d3e3, dl));
4971       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4972       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4973                                getF32Constant(DAG, 0x4011cdf0, dl));
4974       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4975       SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4976                                getF32Constant(DAG, 0x406cfd1c, dl));
4977       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4978       SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4979                                getF32Constant(DAG, 0x408797cb, dl));
4980       SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4981       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
4982                                   getF32Constant(DAG, 0x4006dcab, dl));
4983     }
4984 
4985     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, LogOfMantissa);
4986   }
4987 
4988   // No special expansion.
4989   return DAG.getNode(ISD::FLOG, dl, Op.getValueType(), Op);
4990 }
4991 
4992 /// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for
4993 /// limited-precision mode.
4994 static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4995                           const TargetLowering &TLI) {
4996   // TODO: What fast-math-flags should be set on the floating-point nodes?
4997 
4998   if (Op.getValueType() == MVT::f32 &&
4999       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5000     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5001 
5002     // Get the exponent.
5003     SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
5004 
5005     // Get the significand and build it into a floating-point number with
5006     // exponent of 1.
5007     SDValue X = GetSignificand(DAG, Op1, dl);
5008 
5009     // Different possible minimax approximations of significand in
5010     // floating-point for various degrees of accuracy over [1,2].
5011     SDValue Log2ofMantissa;
5012     if (LimitFloatPrecision <= 6) {
5013       // For floating-point precision of 6:
5014       //
5015       //   Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
5016       //
5017       // error 0.0049451742, which is more than 7 bits
5018       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5019                                getF32Constant(DAG, 0xbeb08fe0, dl));
5020       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5021                                getF32Constant(DAG, 0x40019463, dl));
5022       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5023       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5024                                    getF32Constant(DAG, 0x3fd6633d, dl));
5025     } else if (LimitFloatPrecision <= 12) {
5026       // For floating-point precision of 12:
5027       //
5028       //   Log2ofMantissa =
5029       //     -2.51285454f +
5030       //       (4.07009056f +
5031       //         (-2.12067489f +
5032       //           (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
5033       //
5034       // error 0.0000876136000, which is better than 13 bits
5035       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5036                                getF32Constant(DAG, 0xbda7262e, dl));
5037       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5038                                getF32Constant(DAG, 0x3f25280b, dl));
5039       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5040       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5041                                getF32Constant(DAG, 0x4007b923, dl));
5042       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5043       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5044                                getF32Constant(DAG, 0x40823e2f, dl));
5045       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5046       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5047                                    getF32Constant(DAG, 0x4020d29c, dl));
5048     } else { // LimitFloatPrecision <= 18
5049       // For floating-point precision of 18:
5050       //
5051       //   Log2ofMantissa =
5052       //     -3.0400495f +
5053       //       (6.1129976f +
5054       //         (-5.3420409f +
5055       //           (3.2865683f +
5056       //             (-1.2669343f +
5057       //               (0.27515199f -
5058       //                 0.25691327e-1f * x) * x) * x) * x) * x) * x;
5059       //
5060       // error 0.0000018516, which is better than 18 bits
5061       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5062                                getF32Constant(DAG, 0xbcd2769e, dl));
5063       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5064                                getF32Constant(DAG, 0x3e8ce0b9, dl));
5065       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5066       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5067                                getF32Constant(DAG, 0x3fa22ae7, dl));
5068       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5069       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5070                                getF32Constant(DAG, 0x40525723, dl));
5071       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5072       SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5073                                getF32Constant(DAG, 0x40aaf200, dl));
5074       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5075       SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5076                                getF32Constant(DAG, 0x40c39dad, dl));
5077       SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5078       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
5079                                    getF32Constant(DAG, 0x4042902c, dl));
5080     }
5081 
5082     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
5083   }
5084 
5085   // No special expansion.
5086   return DAG.getNode(ISD::FLOG2, dl, Op.getValueType(), Op);
5087 }
5088 
5089 /// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for
5090 /// limited-precision mode.
5091 static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5092                            const TargetLowering &TLI) {
5093   // TODO: What fast-math-flags should be set on the floating-point nodes?
5094 
5095   if (Op.getValueType() == MVT::f32 &&
5096       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5097     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5098 
5099     // Scale the exponent by log10(2) [0.30102999f].
5100     SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
5101     SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
5102                                         getF32Constant(DAG, 0x3e9a209a, dl));
5103 
5104     // Get the significand and build it into a floating-point number with
5105     // exponent of 1.
5106     SDValue X = GetSignificand(DAG, Op1, dl);
5107 
5108     SDValue Log10ofMantissa;
5109     if (LimitFloatPrecision <= 6) {
5110       // For floating-point precision of 6:
5111       //
5112       //   Log10ofMantissa =
5113       //     -0.50419619f +
5114       //       (0.60948995f - 0.10380950f * x) * x;
5115       //
5116       // error 0.0014886165, which is 6 bits
5117       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5118                                getF32Constant(DAG, 0xbdd49a13, dl));
5119       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5120                                getF32Constant(DAG, 0x3f1c0789, dl));
5121       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5122       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5123                                     getF32Constant(DAG, 0x3f011300, dl));
5124     } else if (LimitFloatPrecision <= 12) {
5125       // For floating-point precision of 12:
5126       //
5127       //   Log10ofMantissa =
5128       //     -0.64831180f +
5129       //       (0.91751397f +
5130       //         (-0.31664806f + 0.47637168e-1f * x) * x) * x;
5131       //
5132       // error 0.00019228036, which is better than 12 bits
5133       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5134                                getF32Constant(DAG, 0x3d431f31, dl));
5135       SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
5136                                getF32Constant(DAG, 0x3ea21fb2, dl));
5137       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5138       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5139                                getF32Constant(DAG, 0x3f6ae232, dl));
5140       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5141       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
5142                                     getF32Constant(DAG, 0x3f25f7c3, dl));
5143     } else { // LimitFloatPrecision <= 18
5144       // For floating-point precision of 18:
5145       //
5146       //   Log10ofMantissa =
5147       //     -0.84299375f +
5148       //       (1.5327582f +
5149       //         (-1.0688956f +
5150       //           (0.49102474f +
5151       //             (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
5152       //
5153       // error 0.0000037995730, which is better than 18 bits
5154       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5155                                getF32Constant(DAG, 0x3c5d51ce, dl));
5156       SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
5157                                getF32Constant(DAG, 0x3e00685a, dl));
5158       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5159       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5160                                getF32Constant(DAG, 0x3efb6798, dl));
5161       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5162       SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
5163                                getF32Constant(DAG, 0x3f88d192, dl));
5164       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5165       SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5166                                getF32Constant(DAG, 0x3fc4316c, dl));
5167       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5168       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
5169                                     getF32Constant(DAG, 0x3f57ce70, dl));
5170     }
5171 
5172     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
5173   }
5174 
5175   // No special expansion.
5176   return DAG.getNode(ISD::FLOG10, dl, Op.getValueType(), Op);
5177 }
5178 
5179 /// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for
5180 /// limited-precision mode.
5181 static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5182                           const TargetLowering &TLI) {
5183   if (Op.getValueType() == MVT::f32 &&
5184       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18)
5185     return getLimitedPrecisionExp2(Op, dl, DAG);
5186 
5187   // No special expansion.
5188   return DAG.getNode(ISD::FEXP2, dl, Op.getValueType(), Op);
5189 }
5190 
5191 /// visitPow - Lower a pow intrinsic. Handles the special sequences for
5192 /// limited-precision mode with x == 10.0f.
5193 static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS,
5194                          SelectionDAG &DAG, const TargetLowering &TLI) {
5195   bool IsExp10 = false;
5196   if (LHS.getValueType() == MVT::f32 && RHS.getValueType() == MVT::f32 &&
5197       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5198     if (ConstantFPSDNode *LHSC = dyn_cast<ConstantFPSDNode>(LHS)) {
5199       APFloat Ten(10.0f);
5200       IsExp10 = LHSC->isExactlyValue(Ten);
5201     }
5202   }
5203 
5204   // TODO: What fast-math-flags should be set on the FMUL node?
5205   if (IsExp10) {
5206     // Put the exponent in the right bit position for later addition to the
5207     // final result:
5208     //
5209     //   #define LOG2OF10 3.3219281f
5210     //   t0 = Op * LOG2OF10;
5211     SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, RHS,
5212                              getF32Constant(DAG, 0x40549a78, dl));
5213     return getLimitedPrecisionExp2(t0, dl, DAG);
5214   }
5215 
5216   // No special expansion.
5217   return DAG.getNode(ISD::FPOW, dl, LHS.getValueType(), LHS, RHS);
5218 }
5219 
5220 /// ExpandPowI - Expand a llvm.powi intrinsic.
5221 static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS,
5222                           SelectionDAG &DAG) {
5223   // If RHS is a constant, we can expand this out to a multiplication tree,
5224   // otherwise we end up lowering to a call to __powidf2 (for example).  When
5225   // optimizing for size, we only want to do this if the expansion would produce
5226   // a small number of multiplies, otherwise we do the full expansion.
5227   if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
5228     // Get the exponent as a positive value.
5229     unsigned Val = RHSC->getSExtValue();
5230     if ((int)Val < 0) Val = -Val;
5231 
5232     // powi(x, 0) -> 1.0
5233     if (Val == 0)
5234       return DAG.getConstantFP(1.0, DL, LHS.getValueType());
5235 
5236     bool OptForSize = DAG.shouldOptForSize();
5237     if (!OptForSize ||
5238         // If optimizing for size, don't insert too many multiplies.
5239         // This inserts up to 5 multiplies.
5240         countPopulation(Val) + Log2_32(Val) < 7) {
5241       // We use the simple binary decomposition method to generate the multiply
5242       // sequence.  There are more optimal ways to do this (for example,
5243       // powi(x,15) generates one more multiply than it should), but this has
5244       // the benefit of being both really simple and much better than a libcall.
5245       SDValue Res;  // Logically starts equal to 1.0
5246       SDValue CurSquare = LHS;
5247       // TODO: Intrinsics should have fast-math-flags that propagate to these
5248       // nodes.
5249       while (Val) {
5250         if (Val & 1) {
5251           if (Res.getNode())
5252             Res = DAG.getNode(ISD::FMUL, DL,Res.getValueType(), Res, CurSquare);
5253           else
5254             Res = CurSquare;  // 1.0*CurSquare.
5255         }
5256 
5257         CurSquare = DAG.getNode(ISD::FMUL, DL, CurSquare.getValueType(),
5258                                 CurSquare, CurSquare);
5259         Val >>= 1;
5260       }
5261 
5262       // If the original was negative, invert the result, producing 1/(x*x*x).
5263       if (RHSC->getSExtValue() < 0)
5264         Res = DAG.getNode(ISD::FDIV, DL, LHS.getValueType(),
5265                           DAG.getConstantFP(1.0, DL, LHS.getValueType()), Res);
5266       return Res;
5267     }
5268   }
5269 
5270   // Otherwise, expand to a libcall.
5271   return DAG.getNode(ISD::FPOWI, DL, LHS.getValueType(), LHS, RHS);
5272 }
5273 
5274 static SDValue expandDivFix(unsigned Opcode, const SDLoc &DL,
5275                             SDValue LHS, SDValue RHS, SDValue Scale,
5276                             SelectionDAG &DAG, const TargetLowering &TLI) {
5277   EVT VT = LHS.getValueType();
5278   bool Signed = Opcode == ISD::SDIVFIX || Opcode == ISD::SDIVFIXSAT;
5279   bool Saturating = Opcode == ISD::SDIVFIXSAT || Opcode == ISD::UDIVFIXSAT;
5280   LLVMContext &Ctx = *DAG.getContext();
5281 
5282   // If the type is legal but the operation isn't, this node might survive all
5283   // the way to operation legalization. If we end up there and we do not have
5284   // the ability to widen the type (if VT*2 is not legal), we cannot expand the
5285   // node.
5286 
5287   // Coax the legalizer into expanding the node during type legalization instead
5288   // by bumping the size by one bit. This will force it to Promote, enabling the
5289   // early expansion and avoiding the need to expand later.
5290 
5291   // We don't have to do this if Scale is 0; that can always be expanded, unless
5292   // it's a saturating signed operation. Those can experience true integer
5293   // division overflow, a case which we must avoid.
5294 
5295   // FIXME: We wouldn't have to do this (or any of the early
5296   // expansion/promotion) if it was possible to expand a libcall of an
5297   // illegal type during operation legalization. But it's not, so things
5298   // get a bit hacky.
5299   unsigned ScaleInt = cast<ConstantSDNode>(Scale)->getZExtValue();
5300   if ((ScaleInt > 0 || (Saturating && Signed)) &&
5301       (TLI.isTypeLegal(VT) ||
5302        (VT.isVector() && TLI.isTypeLegal(VT.getVectorElementType())))) {
5303     TargetLowering::LegalizeAction Action = TLI.getFixedPointOperationAction(
5304         Opcode, VT, ScaleInt);
5305     if (Action != TargetLowering::Legal && Action != TargetLowering::Custom) {
5306       EVT PromVT;
5307       if (VT.isScalarInteger())
5308         PromVT = EVT::getIntegerVT(Ctx, VT.getSizeInBits() + 1);
5309       else if (VT.isVector()) {
5310         PromVT = VT.getVectorElementType();
5311         PromVT = EVT::getIntegerVT(Ctx, PromVT.getSizeInBits() + 1);
5312         PromVT = EVT::getVectorVT(Ctx, PromVT, VT.getVectorElementCount());
5313       } else
5314         llvm_unreachable("Wrong VT for DIVFIX?");
5315       if (Signed) {
5316         LHS = DAG.getSExtOrTrunc(LHS, DL, PromVT);
5317         RHS = DAG.getSExtOrTrunc(RHS, DL, PromVT);
5318       } else {
5319         LHS = DAG.getZExtOrTrunc(LHS, DL, PromVT);
5320         RHS = DAG.getZExtOrTrunc(RHS, DL, PromVT);
5321       }
5322       EVT ShiftTy = TLI.getShiftAmountTy(PromVT, DAG.getDataLayout());
5323       // For saturating operations, we need to shift up the LHS to get the
5324       // proper saturation width, and then shift down again afterwards.
5325       if (Saturating)
5326         LHS = DAG.getNode(ISD::SHL, DL, PromVT, LHS,
5327                           DAG.getConstant(1, DL, ShiftTy));
5328       SDValue Res = DAG.getNode(Opcode, DL, PromVT, LHS, RHS, Scale);
5329       if (Saturating)
5330         Res = DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, PromVT, Res,
5331                           DAG.getConstant(1, DL, ShiftTy));
5332       return DAG.getZExtOrTrunc(Res, DL, VT);
5333     }
5334   }
5335 
5336   return DAG.getNode(Opcode, DL, VT, LHS, RHS, Scale);
5337 }
5338 
5339 // getUnderlyingArgRegs - Find underlying registers used for a truncated,
5340 // bitcasted, or split argument. Returns a list of <Register, size in bits>
5341 static void
5342 getUnderlyingArgRegs(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
5343                      const SDValue &N) {
5344   switch (N.getOpcode()) {
5345   case ISD::CopyFromReg: {
5346     SDValue Op = N.getOperand(1);
5347     Regs.emplace_back(cast<RegisterSDNode>(Op)->getReg(),
5348                       Op.getValueType().getSizeInBits());
5349     return;
5350   }
5351   case ISD::BITCAST:
5352   case ISD::AssertZext:
5353   case ISD::AssertSext:
5354   case ISD::TRUNCATE:
5355     getUnderlyingArgRegs(Regs, N.getOperand(0));
5356     return;
5357   case ISD::BUILD_PAIR:
5358   case ISD::BUILD_VECTOR:
5359   case ISD::CONCAT_VECTORS:
5360     for (SDValue Op : N->op_values())
5361       getUnderlyingArgRegs(Regs, Op);
5362     return;
5363   default:
5364     return;
5365   }
5366 }
5367 
5368 /// If the DbgValueInst is a dbg_value of a function argument, create the
5369 /// corresponding DBG_VALUE machine instruction for it now.  At the end of
5370 /// instruction selection, they will be inserted to the entry BB.
5371 bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
5372     const Value *V, DILocalVariable *Variable, DIExpression *Expr,
5373     DILocation *DL, bool IsDbgDeclare, const SDValue &N) {
5374   const Argument *Arg = dyn_cast<Argument>(V);
5375   if (!Arg)
5376     return false;
5377 
5378   if (!IsDbgDeclare) {
5379     // ArgDbgValues are hoisted to the beginning of the entry block. So we
5380     // should only emit as ArgDbgValue if the dbg.value intrinsic is found in
5381     // the entry block.
5382     bool IsInEntryBlock = FuncInfo.MBB == &FuncInfo.MF->front();
5383     if (!IsInEntryBlock)
5384       return false;
5385 
5386     // ArgDbgValues are hoisted to the beginning of the entry block.  So we
5387     // should only emit as ArgDbgValue if the dbg.value intrinsic describes a
5388     // variable that also is a param.
5389     //
5390     // Although, if we are at the top of the entry block already, we can still
5391     // emit using ArgDbgValue. This might catch some situations when the
5392     // dbg.value refers to an argument that isn't used in the entry block, so
5393     // any CopyToReg node would be optimized out and the only way to express
5394     // this DBG_VALUE is by using the physical reg (or FI) as done in this
5395     // method.  ArgDbgValues are hoisted to the beginning of the entry block. So
5396     // we should only emit as ArgDbgValue if the Variable is an argument to the
5397     // current function, and the dbg.value intrinsic is found in the entry
5398     // block.
5399     bool VariableIsFunctionInputArg = Variable->isParameter() &&
5400         !DL->getInlinedAt();
5401     bool IsInPrologue = SDNodeOrder == LowestSDNodeOrder;
5402     if (!IsInPrologue && !VariableIsFunctionInputArg)
5403       return false;
5404 
5405     // Here we assume that a function argument on IR level only can be used to
5406     // describe one input parameter on source level. If we for example have
5407     // source code like this
5408     //
5409     //    struct A { long x, y; };
5410     //    void foo(struct A a, long b) {
5411     //      ...
5412     //      b = a.x;
5413     //      ...
5414     //    }
5415     //
5416     // and IR like this
5417     //
5418     //  define void @foo(i32 %a1, i32 %a2, i32 %b)  {
5419     //  entry:
5420     //    call void @llvm.dbg.value(metadata i32 %a1, "a", DW_OP_LLVM_fragment
5421     //    call void @llvm.dbg.value(metadata i32 %a2, "a", DW_OP_LLVM_fragment
5422     //    call void @llvm.dbg.value(metadata i32 %b, "b",
5423     //    ...
5424     //    call void @llvm.dbg.value(metadata i32 %a1, "b"
5425     //    ...
5426     //
5427     // then the last dbg.value is describing a parameter "b" using a value that
5428     // is an argument. But since we already has used %a1 to describe a parameter
5429     // we should not handle that last dbg.value here (that would result in an
5430     // incorrect hoisting of the DBG_VALUE to the function entry).
5431     // Notice that we allow one dbg.value per IR level argument, to accommodate
5432     // for the situation with fragments above.
5433     if (VariableIsFunctionInputArg) {
5434       unsigned ArgNo = Arg->getArgNo();
5435       if (ArgNo >= FuncInfo.DescribedArgs.size())
5436         FuncInfo.DescribedArgs.resize(ArgNo + 1, false);
5437       else if (!IsInPrologue && FuncInfo.DescribedArgs.test(ArgNo))
5438         return false;
5439       FuncInfo.DescribedArgs.set(ArgNo);
5440     }
5441   }
5442 
5443   MachineFunction &MF = DAG.getMachineFunction();
5444   const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
5445 
5446   bool IsIndirect = false;
5447   Optional<MachineOperand> Op;
5448   // Some arguments' frame index is recorded during argument lowering.
5449   int FI = FuncInfo.getArgumentFrameIndex(Arg);
5450   if (FI != std::numeric_limits<int>::max())
5451     Op = MachineOperand::CreateFI(FI);
5452 
5453   SmallVector<std::pair<unsigned, unsigned>, 8> ArgRegsAndSizes;
5454   if (!Op && N.getNode()) {
5455     getUnderlyingArgRegs(ArgRegsAndSizes, N);
5456     Register Reg;
5457     if (ArgRegsAndSizes.size() == 1)
5458       Reg = ArgRegsAndSizes.front().first;
5459 
5460     if (Reg && Reg.isVirtual()) {
5461       MachineRegisterInfo &RegInfo = MF.getRegInfo();
5462       Register PR = RegInfo.getLiveInPhysReg(Reg);
5463       if (PR)
5464         Reg = PR;
5465     }
5466     if (Reg) {
5467       Op = MachineOperand::CreateReg(Reg, false);
5468       IsIndirect = IsDbgDeclare;
5469     }
5470   }
5471 
5472   if (!Op && N.getNode()) {
5473     // Check if frame index is available.
5474     SDValue LCandidate = peekThroughBitcasts(N);
5475     if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(LCandidate.getNode()))
5476       if (FrameIndexSDNode *FINode =
5477           dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
5478         Op = MachineOperand::CreateFI(FINode->getIndex());
5479   }
5480 
5481   if (!Op) {
5482     // Create a DBG_VALUE for each decomposed value in ArgRegs to cover Reg
5483     auto splitMultiRegDbgValue
5484       = [&](ArrayRef<std::pair<unsigned, unsigned>> SplitRegs) {
5485       unsigned Offset = 0;
5486       for (auto RegAndSize : SplitRegs) {
5487         // If the expression is already a fragment, the current register
5488         // offset+size might extend beyond the fragment. In this case, only
5489         // the register bits that are inside the fragment are relevant.
5490         int RegFragmentSizeInBits = RegAndSize.second;
5491         if (auto ExprFragmentInfo = Expr->getFragmentInfo()) {
5492           uint64_t ExprFragmentSizeInBits = ExprFragmentInfo->SizeInBits;
5493           // The register is entirely outside the expression fragment,
5494           // so is irrelevant for debug info.
5495           if (Offset >= ExprFragmentSizeInBits)
5496             break;
5497           // The register is partially outside the expression fragment, only
5498           // the low bits within the fragment are relevant for debug info.
5499           if (Offset + RegFragmentSizeInBits > ExprFragmentSizeInBits) {
5500             RegFragmentSizeInBits = ExprFragmentSizeInBits - Offset;
5501           }
5502         }
5503 
5504         auto FragmentExpr = DIExpression::createFragmentExpression(
5505             Expr, Offset, RegFragmentSizeInBits);
5506         Offset += RegAndSize.second;
5507         // If a valid fragment expression cannot be created, the variable's
5508         // correct value cannot be determined and so it is set as Undef.
5509         if (!FragmentExpr) {
5510           SDDbgValue *SDV = DAG.getConstantDbgValue(
5511               Variable, Expr, UndefValue::get(V->getType()), DL, SDNodeOrder);
5512           DAG.AddDbgValue(SDV, nullptr, false);
5513           continue;
5514         }
5515         assert(!IsDbgDeclare && "DbgDeclare operand is not in memory?");
5516         FuncInfo.ArgDbgValues.push_back(
5517           BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsDbgDeclare,
5518                   RegAndSize.first, Variable, *FragmentExpr));
5519       }
5520     };
5521 
5522     // Check if ValueMap has reg number.
5523     DenseMap<const Value *, Register>::const_iterator
5524       VMI = FuncInfo.ValueMap.find(V);
5525     if (VMI != FuncInfo.ValueMap.end()) {
5526       const auto &TLI = DAG.getTargetLoweringInfo();
5527       RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), VMI->second,
5528                        V->getType(), getABIRegCopyCC(V));
5529       if (RFV.occupiesMultipleRegs()) {
5530         splitMultiRegDbgValue(RFV.getRegsAndSizes());
5531         return true;
5532       }
5533 
5534       Op = MachineOperand::CreateReg(VMI->second, false);
5535       IsIndirect = IsDbgDeclare;
5536     } else if (ArgRegsAndSizes.size() > 1) {
5537       // This was split due to the calling convention, and no virtual register
5538       // mapping exists for the value.
5539       splitMultiRegDbgValue(ArgRegsAndSizes);
5540       return true;
5541     }
5542   }
5543 
5544   if (!Op)
5545     return false;
5546 
5547   assert(Variable->isValidLocationForIntrinsic(DL) &&
5548          "Expected inlined-at fields to agree");
5549   IsIndirect = (Op->isReg()) ? IsIndirect : true;
5550   FuncInfo.ArgDbgValues.push_back(
5551       BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsIndirect,
5552               *Op, Variable, Expr));
5553 
5554   return true;
5555 }
5556 
5557 /// Return the appropriate SDDbgValue based on N.
5558 SDDbgValue *SelectionDAGBuilder::getDbgValue(SDValue N,
5559                                              DILocalVariable *Variable,
5560                                              DIExpression *Expr,
5561                                              const DebugLoc &dl,
5562                                              unsigned DbgSDNodeOrder) {
5563   if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) {
5564     // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can describe
5565     // stack slot locations.
5566     //
5567     // Consider "int x = 0; int *px = &x;". There are two kinds of interesting
5568     // debug values here after optimization:
5569     //
5570     //   dbg.value(i32* %px, !"int *px", !DIExpression()), and
5571     //   dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref))
5572     //
5573     // Both describe the direct values of their associated variables.
5574     return DAG.getFrameIndexDbgValue(Variable, Expr, FISDN->getIndex(),
5575                                      /*IsIndirect*/ false, dl, DbgSDNodeOrder);
5576   }
5577   return DAG.getDbgValue(Variable, Expr, N.getNode(), N.getResNo(),
5578                          /*IsIndirect*/ false, dl, DbgSDNodeOrder);
5579 }
5580 
5581 static unsigned FixedPointIntrinsicToOpcode(unsigned Intrinsic) {
5582   switch (Intrinsic) {
5583   case Intrinsic::smul_fix:
5584     return ISD::SMULFIX;
5585   case Intrinsic::umul_fix:
5586     return ISD::UMULFIX;
5587   case Intrinsic::smul_fix_sat:
5588     return ISD::SMULFIXSAT;
5589   case Intrinsic::umul_fix_sat:
5590     return ISD::UMULFIXSAT;
5591   case Intrinsic::sdiv_fix:
5592     return ISD::SDIVFIX;
5593   case Intrinsic::udiv_fix:
5594     return ISD::UDIVFIX;
5595   case Intrinsic::sdiv_fix_sat:
5596     return ISD::SDIVFIXSAT;
5597   case Intrinsic::udiv_fix_sat:
5598     return ISD::UDIVFIXSAT;
5599   default:
5600     llvm_unreachable("Unhandled fixed point intrinsic");
5601   }
5602 }
5603 
5604 void SelectionDAGBuilder::lowerCallToExternalSymbol(const CallInst &I,
5605                                            const char *FunctionName) {
5606   assert(FunctionName && "FunctionName must not be nullptr");
5607   SDValue Callee = DAG.getExternalSymbol(
5608       FunctionName,
5609       DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()));
5610   LowerCallTo(I, Callee, I.isTailCall());
5611 }
5612 
5613 /// Given a @llvm.call.preallocated.setup, return the corresponding
5614 /// preallocated call.
5615 static const CallBase *FindPreallocatedCall(const Value *PreallocatedSetup) {
5616   assert(cast<CallBase>(PreallocatedSetup)
5617                  ->getCalledFunction()
5618                  ->getIntrinsicID() == Intrinsic::call_preallocated_setup &&
5619          "expected call_preallocated_setup Value");
5620   for (auto *U : PreallocatedSetup->users()) {
5621     auto *UseCall = cast<CallBase>(U);
5622     const Function *Fn = UseCall->getCalledFunction();
5623     if (!Fn || Fn->getIntrinsicID() != Intrinsic::call_preallocated_arg) {
5624       return UseCall;
5625     }
5626   }
5627   llvm_unreachable("expected corresponding call to preallocated setup/arg");
5628 }
5629 
5630 /// Lower the call to the specified intrinsic function.
5631 void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
5632                                              unsigned Intrinsic) {
5633   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5634   SDLoc sdl = getCurSDLoc();
5635   DebugLoc dl = getCurDebugLoc();
5636   SDValue Res;
5637 
5638   switch (Intrinsic) {
5639   default:
5640     // By default, turn this into a target intrinsic node.
5641     visitTargetIntrinsic(I, Intrinsic);
5642     return;
5643   case Intrinsic::vscale: {
5644     match(&I, m_VScale(DAG.getDataLayout()));
5645     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5646     setValue(&I,
5647              DAG.getVScale(getCurSDLoc(), VT, APInt(VT.getSizeInBits(), 1)));
5648     return;
5649   }
5650   case Intrinsic::vastart:  visitVAStart(I); return;
5651   case Intrinsic::vaend:    visitVAEnd(I); return;
5652   case Intrinsic::vacopy:   visitVACopy(I); return;
5653   case Intrinsic::returnaddress:
5654     setValue(&I, DAG.getNode(ISD::RETURNADDR, sdl,
5655                              TLI.getPointerTy(DAG.getDataLayout()),
5656                              getValue(I.getArgOperand(0))));
5657     return;
5658   case Intrinsic::addressofreturnaddress:
5659     setValue(&I, DAG.getNode(ISD::ADDROFRETURNADDR, sdl,
5660                              TLI.getPointerTy(DAG.getDataLayout())));
5661     return;
5662   case Intrinsic::sponentry:
5663     setValue(&I, DAG.getNode(ISD::SPONENTRY, sdl,
5664                              TLI.getFrameIndexTy(DAG.getDataLayout())));
5665     return;
5666   case Intrinsic::frameaddress:
5667     setValue(&I, DAG.getNode(ISD::FRAMEADDR, sdl,
5668                              TLI.getFrameIndexTy(DAG.getDataLayout()),
5669                              getValue(I.getArgOperand(0))));
5670     return;
5671   case Intrinsic::read_register: {
5672     Value *Reg = I.getArgOperand(0);
5673     SDValue Chain = getRoot();
5674     SDValue RegName =
5675         DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
5676     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5677     Res = DAG.getNode(ISD::READ_REGISTER, sdl,
5678       DAG.getVTList(VT, MVT::Other), Chain, RegName);
5679     setValue(&I, Res);
5680     DAG.setRoot(Res.getValue(1));
5681     return;
5682   }
5683   case Intrinsic::write_register: {
5684     Value *Reg = I.getArgOperand(0);
5685     Value *RegValue = I.getArgOperand(1);
5686     SDValue Chain = getRoot();
5687     SDValue RegName =
5688         DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
5689     DAG.setRoot(DAG.getNode(ISD::WRITE_REGISTER, sdl, MVT::Other, Chain,
5690                             RegName, getValue(RegValue)));
5691     return;
5692   }
5693   case Intrinsic::memcpy: {
5694     const auto &MCI = cast<MemCpyInst>(I);
5695     SDValue Op1 = getValue(I.getArgOperand(0));
5696     SDValue Op2 = getValue(I.getArgOperand(1));
5697     SDValue Op3 = getValue(I.getArgOperand(2));
5698     // @llvm.memcpy defines 0 and 1 to both mean no alignment.
5699     Align DstAlign = MCI.getDestAlign().valueOrOne();
5700     Align SrcAlign = MCI.getSourceAlign().valueOrOne();
5701     Align Alignment = commonAlignment(DstAlign, SrcAlign);
5702     bool isVol = MCI.isVolatile();
5703     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
5704     // FIXME: Support passing different dest/src alignments to the memcpy DAG
5705     // node.
5706     SDValue Root = isVol ? getRoot() : getMemoryRoot();
5707     SDValue MC = DAG.getMemcpy(Root, sdl, Op1, Op2, Op3, Alignment, isVol,
5708                                /* AlwaysInline */ false, isTC,
5709                                MachinePointerInfo(I.getArgOperand(0)),
5710                                MachinePointerInfo(I.getArgOperand(1)));
5711     updateDAGForMaybeTailCall(MC);
5712     return;
5713   }
5714   case Intrinsic::memcpy_inline: {
5715     const auto &MCI = cast<MemCpyInlineInst>(I);
5716     SDValue Dst = getValue(I.getArgOperand(0));
5717     SDValue Src = getValue(I.getArgOperand(1));
5718     SDValue Size = getValue(I.getArgOperand(2));
5719     assert(isa<ConstantSDNode>(Size) && "memcpy_inline needs constant size");
5720     // @llvm.memcpy.inline defines 0 and 1 to both mean no alignment.
5721     Align DstAlign = MCI.getDestAlign().valueOrOne();
5722     Align SrcAlign = MCI.getSourceAlign().valueOrOne();
5723     Align Alignment = commonAlignment(DstAlign, SrcAlign);
5724     bool isVol = MCI.isVolatile();
5725     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
5726     // FIXME: Support passing different dest/src alignments to the memcpy DAG
5727     // node.
5728     SDValue MC = DAG.getMemcpy(getRoot(), sdl, Dst, Src, Size, Alignment, isVol,
5729                                /* AlwaysInline */ true, isTC,
5730                                MachinePointerInfo(I.getArgOperand(0)),
5731                                MachinePointerInfo(I.getArgOperand(1)));
5732     updateDAGForMaybeTailCall(MC);
5733     return;
5734   }
5735   case Intrinsic::memset: {
5736     const auto &MSI = cast<MemSetInst>(I);
5737     SDValue Op1 = getValue(I.getArgOperand(0));
5738     SDValue Op2 = getValue(I.getArgOperand(1));
5739     SDValue Op3 = getValue(I.getArgOperand(2));
5740     // @llvm.memset defines 0 and 1 to both mean no alignment.
5741     Align Alignment = MSI.getDestAlign().valueOrOne();
5742     bool isVol = MSI.isVolatile();
5743     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
5744     SDValue Root = isVol ? getRoot() : getMemoryRoot();
5745     SDValue MS = DAG.getMemset(Root, sdl, Op1, Op2, Op3, Alignment, isVol, isTC,
5746                                MachinePointerInfo(I.getArgOperand(0)));
5747     updateDAGForMaybeTailCall(MS);
5748     return;
5749   }
5750   case Intrinsic::memmove: {
5751     const auto &MMI = cast<MemMoveInst>(I);
5752     SDValue Op1 = getValue(I.getArgOperand(0));
5753     SDValue Op2 = getValue(I.getArgOperand(1));
5754     SDValue Op3 = getValue(I.getArgOperand(2));
5755     // @llvm.memmove defines 0 and 1 to both mean no alignment.
5756     Align DstAlign = MMI.getDestAlign().valueOrOne();
5757     Align SrcAlign = MMI.getSourceAlign().valueOrOne();
5758     Align Alignment = commonAlignment(DstAlign, SrcAlign);
5759     bool isVol = MMI.isVolatile();
5760     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
5761     // FIXME: Support passing different dest/src alignments to the memmove DAG
5762     // node.
5763     SDValue Root = isVol ? getRoot() : getMemoryRoot();
5764     SDValue MM = DAG.getMemmove(Root, sdl, Op1, Op2, Op3, Alignment, isVol,
5765                                 isTC, MachinePointerInfo(I.getArgOperand(0)),
5766                                 MachinePointerInfo(I.getArgOperand(1)));
5767     updateDAGForMaybeTailCall(MM);
5768     return;
5769   }
5770   case Intrinsic::memcpy_element_unordered_atomic: {
5771     const AtomicMemCpyInst &MI = cast<AtomicMemCpyInst>(I);
5772     SDValue Dst = getValue(MI.getRawDest());
5773     SDValue Src = getValue(MI.getRawSource());
5774     SDValue Length = getValue(MI.getLength());
5775 
5776     unsigned DstAlign = MI.getDestAlignment();
5777     unsigned SrcAlign = MI.getSourceAlignment();
5778     Type *LengthTy = MI.getLength()->getType();
5779     unsigned ElemSz = MI.getElementSizeInBytes();
5780     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
5781     SDValue MC = DAG.getAtomicMemcpy(getRoot(), sdl, Dst, DstAlign, Src,
5782                                      SrcAlign, Length, LengthTy, ElemSz, isTC,
5783                                      MachinePointerInfo(MI.getRawDest()),
5784                                      MachinePointerInfo(MI.getRawSource()));
5785     updateDAGForMaybeTailCall(MC);
5786     return;
5787   }
5788   case Intrinsic::memmove_element_unordered_atomic: {
5789     auto &MI = cast<AtomicMemMoveInst>(I);
5790     SDValue Dst = getValue(MI.getRawDest());
5791     SDValue Src = getValue(MI.getRawSource());
5792     SDValue Length = getValue(MI.getLength());
5793 
5794     unsigned DstAlign = MI.getDestAlignment();
5795     unsigned SrcAlign = MI.getSourceAlignment();
5796     Type *LengthTy = MI.getLength()->getType();
5797     unsigned ElemSz = MI.getElementSizeInBytes();
5798     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
5799     SDValue MC = DAG.getAtomicMemmove(getRoot(), sdl, Dst, DstAlign, Src,
5800                                       SrcAlign, Length, LengthTy, ElemSz, isTC,
5801                                       MachinePointerInfo(MI.getRawDest()),
5802                                       MachinePointerInfo(MI.getRawSource()));
5803     updateDAGForMaybeTailCall(MC);
5804     return;
5805   }
5806   case Intrinsic::memset_element_unordered_atomic: {
5807     auto &MI = cast<AtomicMemSetInst>(I);
5808     SDValue Dst = getValue(MI.getRawDest());
5809     SDValue Val = getValue(MI.getValue());
5810     SDValue Length = getValue(MI.getLength());
5811 
5812     unsigned DstAlign = MI.getDestAlignment();
5813     Type *LengthTy = MI.getLength()->getType();
5814     unsigned ElemSz = MI.getElementSizeInBytes();
5815     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
5816     SDValue MC = DAG.getAtomicMemset(getRoot(), sdl, Dst, DstAlign, Val, Length,
5817                                      LengthTy, ElemSz, isTC,
5818                                      MachinePointerInfo(MI.getRawDest()));
5819     updateDAGForMaybeTailCall(MC);
5820     return;
5821   }
5822   case Intrinsic::call_preallocated_setup: {
5823     const CallBase *PreallocatedCall = FindPreallocatedCall(&I);
5824     SDValue SrcValue = DAG.getSrcValue(PreallocatedCall);
5825     SDValue Res = DAG.getNode(ISD::PREALLOCATED_SETUP, sdl, MVT::Other,
5826                               getRoot(), SrcValue);
5827     setValue(&I, Res);
5828     DAG.setRoot(Res);
5829     return;
5830   }
5831   case Intrinsic::call_preallocated_arg: {
5832     const CallBase *PreallocatedCall = FindPreallocatedCall(I.getOperand(0));
5833     SDValue SrcValue = DAG.getSrcValue(PreallocatedCall);
5834     SDValue Ops[3];
5835     Ops[0] = getRoot();
5836     Ops[1] = SrcValue;
5837     Ops[2] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(1)), sdl,
5838                                    MVT::i32); // arg index
5839     SDValue Res = DAG.getNode(
5840         ISD::PREALLOCATED_ARG, sdl,
5841         DAG.getVTList(TLI.getPointerTy(DAG.getDataLayout()), MVT::Other), Ops);
5842     setValue(&I, Res);
5843     DAG.setRoot(Res.getValue(1));
5844     return;
5845   }
5846   case Intrinsic::dbg_addr:
5847   case Intrinsic::dbg_declare: {
5848     const auto &DI = cast<DbgVariableIntrinsic>(I);
5849     DILocalVariable *Variable = DI.getVariable();
5850     DIExpression *Expression = DI.getExpression();
5851     dropDanglingDebugInfo(Variable, Expression);
5852     assert(Variable && "Missing variable");
5853     LLVM_DEBUG(dbgs() << "SelectionDAG visiting debug intrinsic: " << DI
5854                       << "\n");
5855     // Check if address has undef value.
5856     const Value *Address = DI.getVariableLocation();
5857     if (!Address || isa<UndefValue>(Address) ||
5858         (Address->use_empty() && !isa<Argument>(Address))) {
5859       LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI
5860                         << " (bad/undef/unused-arg address)\n");
5861       return;
5862     }
5863 
5864     bool isParameter = Variable->isParameter() || isa<Argument>(Address);
5865 
5866     // Check if this variable can be described by a frame index, typically
5867     // either as a static alloca or a byval parameter.
5868     int FI = std::numeric_limits<int>::max();
5869     if (const auto *AI =
5870             dyn_cast<AllocaInst>(Address->stripInBoundsConstantOffsets())) {
5871       if (AI->isStaticAlloca()) {
5872         auto I = FuncInfo.StaticAllocaMap.find(AI);
5873         if (I != FuncInfo.StaticAllocaMap.end())
5874           FI = I->second;
5875       }
5876     } else if (const auto *Arg = dyn_cast<Argument>(
5877                    Address->stripInBoundsConstantOffsets())) {
5878       FI = FuncInfo.getArgumentFrameIndex(Arg);
5879     }
5880 
5881     // llvm.dbg.addr is control dependent and always generates indirect
5882     // DBG_VALUE instructions. llvm.dbg.declare is handled as a frame index in
5883     // the MachineFunction variable table.
5884     if (FI != std::numeric_limits<int>::max()) {
5885       if (Intrinsic == Intrinsic::dbg_addr) {
5886         SDDbgValue *SDV = DAG.getFrameIndexDbgValue(
5887             Variable, Expression, FI, /*IsIndirect*/ true, dl, SDNodeOrder);
5888         DAG.AddDbgValue(SDV, getRoot().getNode(), isParameter);
5889       } else {
5890         LLVM_DEBUG(dbgs() << "Skipping " << DI
5891                           << " (variable info stashed in MF side table)\n");
5892       }
5893       return;
5894     }
5895 
5896     SDValue &N = NodeMap[Address];
5897     if (!N.getNode() && isa<Argument>(Address))
5898       // Check unused arguments map.
5899       N = UnusedArgNodeMap[Address];
5900     SDDbgValue *SDV;
5901     if (N.getNode()) {
5902       if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
5903         Address = BCI->getOperand(0);
5904       // Parameters are handled specially.
5905       auto FINode = dyn_cast<FrameIndexSDNode>(N.getNode());
5906       if (isParameter && FINode) {
5907         // Byval parameter. We have a frame index at this point.
5908         SDV =
5909             DAG.getFrameIndexDbgValue(Variable, Expression, FINode->getIndex(),
5910                                       /*IsIndirect*/ true, dl, SDNodeOrder);
5911       } else if (isa<Argument>(Address)) {
5912         // Address is an argument, so try to emit its dbg value using
5913         // virtual register info from the FuncInfo.ValueMap.
5914         EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, true, N);
5915         return;
5916       } else {
5917         SDV = DAG.getDbgValue(Variable, Expression, N.getNode(), N.getResNo(),
5918                               true, dl, SDNodeOrder);
5919       }
5920       DAG.AddDbgValue(SDV, N.getNode(), isParameter);
5921     } else {
5922       // If Address is an argument then try to emit its dbg value using
5923       // virtual register info from the FuncInfo.ValueMap.
5924       if (!EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, true,
5925                                     N)) {
5926         LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI
5927                           << " (could not emit func-arg dbg_value)\n");
5928       }
5929     }
5930     return;
5931   }
5932   case Intrinsic::dbg_label: {
5933     const DbgLabelInst &DI = cast<DbgLabelInst>(I);
5934     DILabel *Label = DI.getLabel();
5935     assert(Label && "Missing label");
5936 
5937     SDDbgLabel *SDV;
5938     SDV = DAG.getDbgLabel(Label, dl, SDNodeOrder);
5939     DAG.AddDbgLabel(SDV);
5940     return;
5941   }
5942   case Intrinsic::dbg_value: {
5943     const DbgValueInst &DI = cast<DbgValueInst>(I);
5944     assert(DI.getVariable() && "Missing variable");
5945 
5946     DILocalVariable *Variable = DI.getVariable();
5947     DIExpression *Expression = DI.getExpression();
5948     dropDanglingDebugInfo(Variable, Expression);
5949     const Value *V = DI.getValue();
5950     if (!V)
5951       return;
5952 
5953     if (handleDebugValue(V, Variable, Expression, dl, DI.getDebugLoc(),
5954         SDNodeOrder))
5955       return;
5956 
5957     // TODO: Dangling debug info will eventually either be resolved or produce
5958     // an Undef DBG_VALUE. However in the resolution case, a gap may appear
5959     // between the original dbg.value location and its resolved DBG_VALUE, which
5960     // we should ideally fill with an extra Undef DBG_VALUE.
5961 
5962     DanglingDebugInfoMap[V].emplace_back(&DI, dl, SDNodeOrder);
5963     return;
5964   }
5965 
5966   case Intrinsic::eh_typeid_for: {
5967     // Find the type id for the given typeinfo.
5968     GlobalValue *GV = ExtractTypeInfo(I.getArgOperand(0));
5969     unsigned TypeID = DAG.getMachineFunction().getTypeIDFor(GV);
5970     Res = DAG.getConstant(TypeID, sdl, MVT::i32);
5971     setValue(&I, Res);
5972     return;
5973   }
5974 
5975   case Intrinsic::eh_return_i32:
5976   case Intrinsic::eh_return_i64:
5977     DAG.getMachineFunction().setCallsEHReturn(true);
5978     DAG.setRoot(DAG.getNode(ISD::EH_RETURN, sdl,
5979                             MVT::Other,
5980                             getControlRoot(),
5981                             getValue(I.getArgOperand(0)),
5982                             getValue(I.getArgOperand(1))));
5983     return;
5984   case Intrinsic::eh_unwind_init:
5985     DAG.getMachineFunction().setCallsUnwindInit(true);
5986     return;
5987   case Intrinsic::eh_dwarf_cfa:
5988     setValue(&I, DAG.getNode(ISD::EH_DWARF_CFA, sdl,
5989                              TLI.getPointerTy(DAG.getDataLayout()),
5990                              getValue(I.getArgOperand(0))));
5991     return;
5992   case Intrinsic::eh_sjlj_callsite: {
5993     MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
5994     ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(0));
5995     assert(CI && "Non-constant call site value in eh.sjlj.callsite!");
5996     assert(MMI.getCurrentCallSite() == 0 && "Overlapping call sites!");
5997 
5998     MMI.setCurrentCallSite(CI->getZExtValue());
5999     return;
6000   }
6001   case Intrinsic::eh_sjlj_functioncontext: {
6002     // Get and store the index of the function context.
6003     MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
6004     AllocaInst *FnCtx =
6005       cast<AllocaInst>(I.getArgOperand(0)->stripPointerCasts());
6006     int FI = FuncInfo.StaticAllocaMap[FnCtx];
6007     MFI.setFunctionContextIndex(FI);
6008     return;
6009   }
6010   case Intrinsic::eh_sjlj_setjmp: {
6011     SDValue Ops[2];
6012     Ops[0] = getRoot();
6013     Ops[1] = getValue(I.getArgOperand(0));
6014     SDValue Op = DAG.getNode(ISD::EH_SJLJ_SETJMP, sdl,
6015                              DAG.getVTList(MVT::i32, MVT::Other), Ops);
6016     setValue(&I, Op.getValue(0));
6017     DAG.setRoot(Op.getValue(1));
6018     return;
6019   }
6020   case Intrinsic::eh_sjlj_longjmp:
6021     DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, sdl, MVT::Other,
6022                             getRoot(), getValue(I.getArgOperand(0))));
6023     return;
6024   case Intrinsic::eh_sjlj_setup_dispatch:
6025     DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_SETUP_DISPATCH, sdl, MVT::Other,
6026                             getRoot()));
6027     return;
6028   case Intrinsic::masked_gather:
6029     visitMaskedGather(I);
6030     return;
6031   case Intrinsic::masked_load:
6032     visitMaskedLoad(I);
6033     return;
6034   case Intrinsic::masked_scatter:
6035     visitMaskedScatter(I);
6036     return;
6037   case Intrinsic::masked_store:
6038     visitMaskedStore(I);
6039     return;
6040   case Intrinsic::masked_expandload:
6041     visitMaskedLoad(I, true /* IsExpanding */);
6042     return;
6043   case Intrinsic::masked_compressstore:
6044     visitMaskedStore(I, true /* IsCompressing */);
6045     return;
6046   case Intrinsic::powi:
6047     setValue(&I, ExpandPowI(sdl, getValue(I.getArgOperand(0)),
6048                             getValue(I.getArgOperand(1)), DAG));
6049     return;
6050   case Intrinsic::log:
6051     setValue(&I, expandLog(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
6052     return;
6053   case Intrinsic::log2:
6054     setValue(&I, expandLog2(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
6055     return;
6056   case Intrinsic::log10:
6057     setValue(&I, expandLog10(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
6058     return;
6059   case Intrinsic::exp:
6060     setValue(&I, expandExp(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
6061     return;
6062   case Intrinsic::exp2:
6063     setValue(&I, expandExp2(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
6064     return;
6065   case Intrinsic::pow:
6066     setValue(&I, expandPow(sdl, getValue(I.getArgOperand(0)),
6067                            getValue(I.getArgOperand(1)), DAG, TLI));
6068     return;
6069   case Intrinsic::sqrt:
6070   case Intrinsic::fabs:
6071   case Intrinsic::sin:
6072   case Intrinsic::cos:
6073   case Intrinsic::floor:
6074   case Intrinsic::ceil:
6075   case Intrinsic::trunc:
6076   case Intrinsic::rint:
6077   case Intrinsic::nearbyint:
6078   case Intrinsic::round:
6079   case Intrinsic::roundeven:
6080   case Intrinsic::canonicalize: {
6081     unsigned Opcode;
6082     switch (Intrinsic) {
6083     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
6084     case Intrinsic::sqrt:      Opcode = ISD::FSQRT;      break;
6085     case Intrinsic::fabs:      Opcode = ISD::FABS;       break;
6086     case Intrinsic::sin:       Opcode = ISD::FSIN;       break;
6087     case Intrinsic::cos:       Opcode = ISD::FCOS;       break;
6088     case Intrinsic::floor:     Opcode = ISD::FFLOOR;     break;
6089     case Intrinsic::ceil:      Opcode = ISD::FCEIL;      break;
6090     case Intrinsic::trunc:     Opcode = ISD::FTRUNC;     break;
6091     case Intrinsic::rint:      Opcode = ISD::FRINT;      break;
6092     case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break;
6093     case Intrinsic::round:     Opcode = ISD::FROUND;     break;
6094     case Intrinsic::roundeven: Opcode = ISD::FROUNDEVEN; break;
6095     case Intrinsic::canonicalize: Opcode = ISD::FCANONICALIZE; break;
6096     }
6097 
6098     setValue(&I, DAG.getNode(Opcode, sdl,
6099                              getValue(I.getArgOperand(0)).getValueType(),
6100                              getValue(I.getArgOperand(0))));
6101     return;
6102   }
6103   case Intrinsic::lround:
6104   case Intrinsic::llround:
6105   case Intrinsic::lrint:
6106   case Intrinsic::llrint: {
6107     unsigned Opcode;
6108     switch (Intrinsic) {
6109     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
6110     case Intrinsic::lround:  Opcode = ISD::LROUND;  break;
6111     case Intrinsic::llround: Opcode = ISD::LLROUND; break;
6112     case Intrinsic::lrint:   Opcode = ISD::LRINT;   break;
6113     case Intrinsic::llrint:  Opcode = ISD::LLRINT;  break;
6114     }
6115 
6116     EVT RetVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6117     setValue(&I, DAG.getNode(Opcode, sdl, RetVT,
6118                              getValue(I.getArgOperand(0))));
6119     return;
6120   }
6121   case Intrinsic::minnum:
6122     setValue(&I, DAG.getNode(ISD::FMINNUM, sdl,
6123                              getValue(I.getArgOperand(0)).getValueType(),
6124                              getValue(I.getArgOperand(0)),
6125                              getValue(I.getArgOperand(1))));
6126     return;
6127   case Intrinsic::maxnum:
6128     setValue(&I, DAG.getNode(ISD::FMAXNUM, sdl,
6129                              getValue(I.getArgOperand(0)).getValueType(),
6130                              getValue(I.getArgOperand(0)),
6131                              getValue(I.getArgOperand(1))));
6132     return;
6133   case Intrinsic::minimum:
6134     setValue(&I, DAG.getNode(ISD::FMINIMUM, sdl,
6135                              getValue(I.getArgOperand(0)).getValueType(),
6136                              getValue(I.getArgOperand(0)),
6137                              getValue(I.getArgOperand(1))));
6138     return;
6139   case Intrinsic::maximum:
6140     setValue(&I, DAG.getNode(ISD::FMAXIMUM, sdl,
6141                              getValue(I.getArgOperand(0)).getValueType(),
6142                              getValue(I.getArgOperand(0)),
6143                              getValue(I.getArgOperand(1))));
6144     return;
6145   case Intrinsic::copysign:
6146     setValue(&I, DAG.getNode(ISD::FCOPYSIGN, sdl,
6147                              getValue(I.getArgOperand(0)).getValueType(),
6148                              getValue(I.getArgOperand(0)),
6149                              getValue(I.getArgOperand(1))));
6150     return;
6151   case Intrinsic::fma:
6152     setValue(&I, DAG.getNode(ISD::FMA, sdl,
6153                              getValue(I.getArgOperand(0)).getValueType(),
6154                              getValue(I.getArgOperand(0)),
6155                              getValue(I.getArgOperand(1)),
6156                              getValue(I.getArgOperand(2))));
6157     return;
6158 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC)                         \
6159   case Intrinsic::INTRINSIC:
6160 #include "llvm/IR/ConstrainedOps.def"
6161     visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(I));
6162     return;
6163   case Intrinsic::fmuladd: {
6164     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6165     if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
6166         TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT)) {
6167       setValue(&I, DAG.getNode(ISD::FMA, sdl,
6168                                getValue(I.getArgOperand(0)).getValueType(),
6169                                getValue(I.getArgOperand(0)),
6170                                getValue(I.getArgOperand(1)),
6171                                getValue(I.getArgOperand(2))));
6172     } else {
6173       // TODO: Intrinsic calls should have fast-math-flags.
6174       SDValue Mul = DAG.getNode(ISD::FMUL, sdl,
6175                                 getValue(I.getArgOperand(0)).getValueType(),
6176                                 getValue(I.getArgOperand(0)),
6177                                 getValue(I.getArgOperand(1)));
6178       SDValue Add = DAG.getNode(ISD::FADD, sdl,
6179                                 getValue(I.getArgOperand(0)).getValueType(),
6180                                 Mul,
6181                                 getValue(I.getArgOperand(2)));
6182       setValue(&I, Add);
6183     }
6184     return;
6185   }
6186   case Intrinsic::convert_to_fp16:
6187     setValue(&I, DAG.getNode(ISD::BITCAST, sdl, MVT::i16,
6188                              DAG.getNode(ISD::FP_ROUND, sdl, MVT::f16,
6189                                          getValue(I.getArgOperand(0)),
6190                                          DAG.getTargetConstant(0, sdl,
6191                                                                MVT::i32))));
6192     return;
6193   case Intrinsic::convert_from_fp16:
6194     setValue(&I, DAG.getNode(ISD::FP_EXTEND, sdl,
6195                              TLI.getValueType(DAG.getDataLayout(), I.getType()),
6196                              DAG.getNode(ISD::BITCAST, sdl, MVT::f16,
6197                                          getValue(I.getArgOperand(0)))));
6198     return;
6199   case Intrinsic::pcmarker: {
6200     SDValue Tmp = getValue(I.getArgOperand(0));
6201     DAG.setRoot(DAG.getNode(ISD::PCMARKER, sdl, MVT::Other, getRoot(), Tmp));
6202     return;
6203   }
6204   case Intrinsic::readcyclecounter: {
6205     SDValue Op = getRoot();
6206     Res = DAG.getNode(ISD::READCYCLECOUNTER, sdl,
6207                       DAG.getVTList(MVT::i64, MVT::Other), Op);
6208     setValue(&I, Res);
6209     DAG.setRoot(Res.getValue(1));
6210     return;
6211   }
6212   case Intrinsic::bitreverse:
6213     setValue(&I, DAG.getNode(ISD::BITREVERSE, sdl,
6214                              getValue(I.getArgOperand(0)).getValueType(),
6215                              getValue(I.getArgOperand(0))));
6216     return;
6217   case Intrinsic::bswap:
6218     setValue(&I, DAG.getNode(ISD::BSWAP, sdl,
6219                              getValue(I.getArgOperand(0)).getValueType(),
6220                              getValue(I.getArgOperand(0))));
6221     return;
6222   case Intrinsic::cttz: {
6223     SDValue Arg = getValue(I.getArgOperand(0));
6224     ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
6225     EVT Ty = Arg.getValueType();
6226     setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_UNDEF,
6227                              sdl, Ty, Arg));
6228     return;
6229   }
6230   case Intrinsic::ctlz: {
6231     SDValue Arg = getValue(I.getArgOperand(0));
6232     ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
6233     EVT Ty = Arg.getValueType();
6234     setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_UNDEF,
6235                              sdl, Ty, Arg));
6236     return;
6237   }
6238   case Intrinsic::ctpop: {
6239     SDValue Arg = getValue(I.getArgOperand(0));
6240     EVT Ty = Arg.getValueType();
6241     setValue(&I, DAG.getNode(ISD::CTPOP, sdl, Ty, Arg));
6242     return;
6243   }
6244   case Intrinsic::fshl:
6245   case Intrinsic::fshr: {
6246     bool IsFSHL = Intrinsic == Intrinsic::fshl;
6247     SDValue X = getValue(I.getArgOperand(0));
6248     SDValue Y = getValue(I.getArgOperand(1));
6249     SDValue Z = getValue(I.getArgOperand(2));
6250     EVT VT = X.getValueType();
6251     SDValue BitWidthC = DAG.getConstant(VT.getScalarSizeInBits(), sdl, VT);
6252     SDValue Zero = DAG.getConstant(0, sdl, VT);
6253     SDValue ShAmt = DAG.getNode(ISD::UREM, sdl, VT, Z, BitWidthC);
6254 
6255     auto FunnelOpcode = IsFSHL ? ISD::FSHL : ISD::FSHR;
6256     if (TLI.isOperationLegalOrCustom(FunnelOpcode, VT)) {
6257       setValue(&I, DAG.getNode(FunnelOpcode, sdl, VT, X, Y, Z));
6258       return;
6259     }
6260 
6261     // When X == Y, this is rotate. If the data type has a power-of-2 size, we
6262     // avoid the select that is necessary in the general case to filter out
6263     // the 0-shift possibility that leads to UB.
6264     if (X == Y && isPowerOf2_32(VT.getScalarSizeInBits())) {
6265       auto RotateOpcode = IsFSHL ? ISD::ROTL : ISD::ROTR;
6266       if (TLI.isOperationLegalOrCustom(RotateOpcode, VT)) {
6267         setValue(&I, DAG.getNode(RotateOpcode, sdl, VT, X, Z));
6268         return;
6269       }
6270 
6271       // Some targets only rotate one way. Try the opposite direction.
6272       RotateOpcode = IsFSHL ? ISD::ROTR : ISD::ROTL;
6273       if (TLI.isOperationLegalOrCustom(RotateOpcode, VT)) {
6274         // Negate the shift amount because it is safe to ignore the high bits.
6275         SDValue NegShAmt = DAG.getNode(ISD::SUB, sdl, VT, Zero, Z);
6276         setValue(&I, DAG.getNode(RotateOpcode, sdl, VT, X, NegShAmt));
6277         return;
6278       }
6279 
6280       // fshl (rotl): (X << (Z % BW)) | (X >> ((0 - Z) % BW))
6281       // fshr (rotr): (X << ((0 - Z) % BW)) | (X >> (Z % BW))
6282       SDValue NegZ = DAG.getNode(ISD::SUB, sdl, VT, Zero, Z);
6283       SDValue NShAmt = DAG.getNode(ISD::UREM, sdl, VT, NegZ, BitWidthC);
6284       SDValue ShX = DAG.getNode(ISD::SHL, sdl, VT, X, IsFSHL ? ShAmt : NShAmt);
6285       SDValue ShY = DAG.getNode(ISD::SRL, sdl, VT, X, IsFSHL ? NShAmt : ShAmt);
6286       setValue(&I, DAG.getNode(ISD::OR, sdl, VT, ShX, ShY));
6287       return;
6288     }
6289 
6290     // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
6291     // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
6292     SDValue InvShAmt = DAG.getNode(ISD::SUB, sdl, VT, BitWidthC, ShAmt);
6293     SDValue ShX = DAG.getNode(ISD::SHL, sdl, VT, X, IsFSHL ? ShAmt : InvShAmt);
6294     SDValue ShY = DAG.getNode(ISD::SRL, sdl, VT, Y, IsFSHL ? InvShAmt : ShAmt);
6295     SDValue Or = DAG.getNode(ISD::OR, sdl, VT, ShX, ShY);
6296 
6297     // If (Z % BW == 0), then the opposite direction shift is shift-by-bitwidth,
6298     // and that is undefined. We must compare and select to avoid UB.
6299     EVT CCVT = MVT::i1;
6300     if (VT.isVector())
6301       CCVT = EVT::getVectorVT(*Context, CCVT, VT.getVectorNumElements());
6302 
6303     // For fshl, 0-shift returns the 1st arg (X).
6304     // For fshr, 0-shift returns the 2nd arg (Y).
6305     SDValue IsZeroShift = DAG.getSetCC(sdl, CCVT, ShAmt, Zero, ISD::SETEQ);
6306     setValue(&I, DAG.getSelect(sdl, VT, IsZeroShift, IsFSHL ? X : Y, Or));
6307     return;
6308   }
6309   case Intrinsic::sadd_sat: {
6310     SDValue Op1 = getValue(I.getArgOperand(0));
6311     SDValue Op2 = getValue(I.getArgOperand(1));
6312     setValue(&I, DAG.getNode(ISD::SADDSAT, sdl, Op1.getValueType(), Op1, Op2));
6313     return;
6314   }
6315   case Intrinsic::uadd_sat: {
6316     SDValue Op1 = getValue(I.getArgOperand(0));
6317     SDValue Op2 = getValue(I.getArgOperand(1));
6318     setValue(&I, DAG.getNode(ISD::UADDSAT, sdl, Op1.getValueType(), Op1, Op2));
6319     return;
6320   }
6321   case Intrinsic::ssub_sat: {
6322     SDValue Op1 = getValue(I.getArgOperand(0));
6323     SDValue Op2 = getValue(I.getArgOperand(1));
6324     setValue(&I, DAG.getNode(ISD::SSUBSAT, sdl, Op1.getValueType(), Op1, Op2));
6325     return;
6326   }
6327   case Intrinsic::usub_sat: {
6328     SDValue Op1 = getValue(I.getArgOperand(0));
6329     SDValue Op2 = getValue(I.getArgOperand(1));
6330     setValue(&I, DAG.getNode(ISD::USUBSAT, sdl, Op1.getValueType(), Op1, Op2));
6331     return;
6332   }
6333   case Intrinsic::smul_fix:
6334   case Intrinsic::umul_fix:
6335   case Intrinsic::smul_fix_sat:
6336   case Intrinsic::umul_fix_sat: {
6337     SDValue Op1 = getValue(I.getArgOperand(0));
6338     SDValue Op2 = getValue(I.getArgOperand(1));
6339     SDValue Op3 = getValue(I.getArgOperand(2));
6340     setValue(&I, DAG.getNode(FixedPointIntrinsicToOpcode(Intrinsic), sdl,
6341                              Op1.getValueType(), Op1, Op2, Op3));
6342     return;
6343   }
6344   case Intrinsic::sdiv_fix:
6345   case Intrinsic::udiv_fix:
6346   case Intrinsic::sdiv_fix_sat:
6347   case Intrinsic::udiv_fix_sat: {
6348     SDValue Op1 = getValue(I.getArgOperand(0));
6349     SDValue Op2 = getValue(I.getArgOperand(1));
6350     SDValue Op3 = getValue(I.getArgOperand(2));
6351     setValue(&I, expandDivFix(FixedPointIntrinsicToOpcode(Intrinsic), sdl,
6352                               Op1, Op2, Op3, DAG, TLI));
6353     return;
6354   }
6355   case Intrinsic::stacksave: {
6356     SDValue Op = getRoot();
6357     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6358     Res = DAG.getNode(ISD::STACKSAVE, sdl, DAG.getVTList(VT, MVT::Other), Op);
6359     setValue(&I, Res);
6360     DAG.setRoot(Res.getValue(1));
6361     return;
6362   }
6363   case Intrinsic::stackrestore:
6364     Res = getValue(I.getArgOperand(0));
6365     DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other, getRoot(), Res));
6366     return;
6367   case Intrinsic::get_dynamic_area_offset: {
6368     SDValue Op = getRoot();
6369     EVT PtrTy = TLI.getFrameIndexTy(DAG.getDataLayout());
6370     EVT ResTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
6371     // Result type for @llvm.get.dynamic.area.offset should match PtrTy for
6372     // target.
6373     if (PtrTy.getSizeInBits() < ResTy.getSizeInBits())
6374       report_fatal_error("Wrong result type for @llvm.get.dynamic.area.offset"
6375                          " intrinsic!");
6376     Res = DAG.getNode(ISD::GET_DYNAMIC_AREA_OFFSET, sdl, DAG.getVTList(ResTy),
6377                       Op);
6378     DAG.setRoot(Op);
6379     setValue(&I, Res);
6380     return;
6381   }
6382   case Intrinsic::stackguard: {
6383     MachineFunction &MF = DAG.getMachineFunction();
6384     const Module &M = *MF.getFunction().getParent();
6385     SDValue Chain = getRoot();
6386     if (TLI.useLoadStackGuardNode()) {
6387       Res = getLoadStackGuard(DAG, sdl, Chain);
6388     } else {
6389       EVT PtrTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
6390       const Value *Global = TLI.getSDagStackGuard(M);
6391       unsigned Align = DL->getPrefTypeAlignment(Global->getType());
6392       Res = DAG.getLoad(PtrTy, sdl, Chain, getValue(Global),
6393                         MachinePointerInfo(Global, 0), Align,
6394                         MachineMemOperand::MOVolatile);
6395     }
6396     if (TLI.useStackGuardXorFP())
6397       Res = TLI.emitStackGuardXorFP(DAG, Res, sdl);
6398     DAG.setRoot(Chain);
6399     setValue(&I, Res);
6400     return;
6401   }
6402   case Intrinsic::stackprotector: {
6403     // Emit code into the DAG to store the stack guard onto the stack.
6404     MachineFunction &MF = DAG.getMachineFunction();
6405     MachineFrameInfo &MFI = MF.getFrameInfo();
6406     SDValue Src, Chain = getRoot();
6407 
6408     if (TLI.useLoadStackGuardNode())
6409       Src = getLoadStackGuard(DAG, sdl, Chain);
6410     else
6411       Src = getValue(I.getArgOperand(0));   // The guard's value.
6412 
6413     AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
6414 
6415     int FI = FuncInfo.StaticAllocaMap[Slot];
6416     MFI.setStackProtectorIndex(FI);
6417     EVT PtrTy = TLI.getFrameIndexTy(DAG.getDataLayout());
6418 
6419     SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
6420 
6421     // Store the stack protector onto the stack.
6422     Res = DAG.getStore(Chain, sdl, Src, FIN, MachinePointerInfo::getFixedStack(
6423                                                  DAG.getMachineFunction(), FI),
6424                        /* Alignment = */ 0, MachineMemOperand::MOVolatile);
6425     setValue(&I, Res);
6426     DAG.setRoot(Res);
6427     return;
6428   }
6429   case Intrinsic::objectsize:
6430     llvm_unreachable("llvm.objectsize.* should have been lowered already");
6431 
6432   case Intrinsic::is_constant:
6433     llvm_unreachable("llvm.is.constant.* should have been lowered already");
6434 
6435   case Intrinsic::annotation:
6436   case Intrinsic::ptr_annotation:
6437   case Intrinsic::launder_invariant_group:
6438   case Intrinsic::strip_invariant_group:
6439     // Drop the intrinsic, but forward the value
6440     setValue(&I, getValue(I.getOperand(0)));
6441     return;
6442   case Intrinsic::assume:
6443   case Intrinsic::var_annotation:
6444   case Intrinsic::sideeffect:
6445     // Discard annotate attributes, assumptions, and artificial side-effects.
6446     return;
6447 
6448   case Intrinsic::codeview_annotation: {
6449     // Emit a label associated with this metadata.
6450     MachineFunction &MF = DAG.getMachineFunction();
6451     MCSymbol *Label =
6452         MF.getMMI().getContext().createTempSymbol("annotation", true);
6453     Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(0))->getMetadata();
6454     MF.addCodeViewAnnotation(Label, cast<MDNode>(MD));
6455     Res = DAG.getLabelNode(ISD::ANNOTATION_LABEL, sdl, getRoot(), Label);
6456     DAG.setRoot(Res);
6457     return;
6458   }
6459 
6460   case Intrinsic::init_trampoline: {
6461     const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts());
6462 
6463     SDValue Ops[6];
6464     Ops[0] = getRoot();
6465     Ops[1] = getValue(I.getArgOperand(0));
6466     Ops[2] = getValue(I.getArgOperand(1));
6467     Ops[3] = getValue(I.getArgOperand(2));
6468     Ops[4] = DAG.getSrcValue(I.getArgOperand(0));
6469     Ops[5] = DAG.getSrcValue(F);
6470 
6471     Res = DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other, Ops);
6472 
6473     DAG.setRoot(Res);
6474     return;
6475   }
6476   case Intrinsic::adjust_trampoline:
6477     setValue(&I, DAG.getNode(ISD::ADJUST_TRAMPOLINE, sdl,
6478                              TLI.getPointerTy(DAG.getDataLayout()),
6479                              getValue(I.getArgOperand(0))));
6480     return;
6481   case Intrinsic::gcroot: {
6482     assert(DAG.getMachineFunction().getFunction().hasGC() &&
6483            "only valid in functions with gc specified, enforced by Verifier");
6484     assert(GFI && "implied by previous");
6485     const Value *Alloca = I.getArgOperand(0)->stripPointerCasts();
6486     const Constant *TypeMap = cast<Constant>(I.getArgOperand(1));
6487 
6488     FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
6489     GFI->addStackRoot(FI->getIndex(), TypeMap);
6490     return;
6491   }
6492   case Intrinsic::gcread:
6493   case Intrinsic::gcwrite:
6494     llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
6495   case Intrinsic::flt_rounds:
6496     Res = DAG.getNode(ISD::FLT_ROUNDS_, sdl, {MVT::i32, MVT::Other}, getRoot());
6497     setValue(&I, Res);
6498     DAG.setRoot(Res.getValue(1));
6499     return;
6500 
6501   case Intrinsic::expect:
6502     // Just replace __builtin_expect(exp, c) with EXP.
6503     setValue(&I, getValue(I.getArgOperand(0)));
6504     return;
6505 
6506   case Intrinsic::debugtrap:
6507   case Intrinsic::trap: {
6508     StringRef TrapFuncName =
6509         I.getAttributes()
6510             .getAttribute(AttributeList::FunctionIndex, "trap-func-name")
6511             .getValueAsString();
6512     if (TrapFuncName.empty()) {
6513       ISD::NodeType Op = (Intrinsic == Intrinsic::trap) ?
6514         ISD::TRAP : ISD::DEBUGTRAP;
6515       DAG.setRoot(DAG.getNode(Op, sdl,MVT::Other, getRoot()));
6516       return;
6517     }
6518     TargetLowering::ArgListTy Args;
6519 
6520     TargetLowering::CallLoweringInfo CLI(DAG);
6521     CLI.setDebugLoc(sdl).setChain(getRoot()).setLibCallee(
6522         CallingConv::C, I.getType(),
6523         DAG.getExternalSymbol(TrapFuncName.data(),
6524                               TLI.getPointerTy(DAG.getDataLayout())),
6525         std::move(Args));
6526 
6527     std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
6528     DAG.setRoot(Result.second);
6529     return;
6530   }
6531 
6532   case Intrinsic::uadd_with_overflow:
6533   case Intrinsic::sadd_with_overflow:
6534   case Intrinsic::usub_with_overflow:
6535   case Intrinsic::ssub_with_overflow:
6536   case Intrinsic::umul_with_overflow:
6537   case Intrinsic::smul_with_overflow: {
6538     ISD::NodeType Op;
6539     switch (Intrinsic) {
6540     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
6541     case Intrinsic::uadd_with_overflow: Op = ISD::UADDO; break;
6542     case Intrinsic::sadd_with_overflow: Op = ISD::SADDO; break;
6543     case Intrinsic::usub_with_overflow: Op = ISD::USUBO; break;
6544     case Intrinsic::ssub_with_overflow: Op = ISD::SSUBO; break;
6545     case Intrinsic::umul_with_overflow: Op = ISD::UMULO; break;
6546     case Intrinsic::smul_with_overflow: Op = ISD::SMULO; break;
6547     }
6548     SDValue Op1 = getValue(I.getArgOperand(0));
6549     SDValue Op2 = getValue(I.getArgOperand(1));
6550 
6551     EVT ResultVT = Op1.getValueType();
6552     EVT OverflowVT = MVT::i1;
6553     if (ResultVT.isVector())
6554       OverflowVT = EVT::getVectorVT(
6555           *Context, OverflowVT, ResultVT.getVectorNumElements());
6556 
6557     SDVTList VTs = DAG.getVTList(ResultVT, OverflowVT);
6558     setValue(&I, DAG.getNode(Op, sdl, VTs, Op1, Op2));
6559     return;
6560   }
6561   case Intrinsic::prefetch: {
6562     SDValue Ops[5];
6563     unsigned rw = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
6564     auto Flags = rw == 0 ? MachineMemOperand::MOLoad :MachineMemOperand::MOStore;
6565     Ops[0] = DAG.getRoot();
6566     Ops[1] = getValue(I.getArgOperand(0));
6567     Ops[2] = getValue(I.getArgOperand(1));
6568     Ops[3] = getValue(I.getArgOperand(2));
6569     Ops[4] = getValue(I.getArgOperand(3));
6570     SDValue Result = DAG.getMemIntrinsicNode(
6571         ISD::PREFETCH, sdl, DAG.getVTList(MVT::Other), Ops,
6572         EVT::getIntegerVT(*Context, 8), MachinePointerInfo(I.getArgOperand(0)),
6573         /* align */ None, Flags);
6574 
6575     // Chain the prefetch in parallell with any pending loads, to stay out of
6576     // the way of later optimizations.
6577     PendingLoads.push_back(Result);
6578     Result = getRoot();
6579     DAG.setRoot(Result);
6580     return;
6581   }
6582   case Intrinsic::lifetime_start:
6583   case Intrinsic::lifetime_end: {
6584     bool IsStart = (Intrinsic == Intrinsic::lifetime_start);
6585     // Stack coloring is not enabled in O0, discard region information.
6586     if (TM.getOptLevel() == CodeGenOpt::None)
6587       return;
6588 
6589     const int64_t ObjectSize =
6590         cast<ConstantInt>(I.getArgOperand(0))->getSExtValue();
6591     Value *const ObjectPtr = I.getArgOperand(1);
6592     SmallVector<const Value *, 4> Allocas;
6593     GetUnderlyingObjects(ObjectPtr, Allocas, *DL);
6594 
6595     for (SmallVectorImpl<const Value*>::iterator Object = Allocas.begin(),
6596            E = Allocas.end(); Object != E; ++Object) {
6597       const AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(*Object);
6598 
6599       // Could not find an Alloca.
6600       if (!LifetimeObject)
6601         continue;
6602 
6603       // First check that the Alloca is static, otherwise it won't have a
6604       // valid frame index.
6605       auto SI = FuncInfo.StaticAllocaMap.find(LifetimeObject);
6606       if (SI == FuncInfo.StaticAllocaMap.end())
6607         return;
6608 
6609       const int FrameIndex = SI->second;
6610       int64_t Offset;
6611       if (GetPointerBaseWithConstantOffset(
6612               ObjectPtr, Offset, DAG.getDataLayout()) != LifetimeObject)
6613         Offset = -1; // Cannot determine offset from alloca to lifetime object.
6614       Res = DAG.getLifetimeNode(IsStart, sdl, getRoot(), FrameIndex, ObjectSize,
6615                                 Offset);
6616       DAG.setRoot(Res);
6617     }
6618     return;
6619   }
6620   case Intrinsic::invariant_start:
6621     // Discard region information.
6622     setValue(&I, DAG.getUNDEF(TLI.getPointerTy(DAG.getDataLayout())));
6623     return;
6624   case Intrinsic::invariant_end:
6625     // Discard region information.
6626     return;
6627   case Intrinsic::clear_cache:
6628     /// FunctionName may be null.
6629     if (const char *FunctionName = TLI.getClearCacheBuiltinName())
6630       lowerCallToExternalSymbol(I, FunctionName);
6631     return;
6632   case Intrinsic::donothing:
6633     // ignore
6634     return;
6635   case Intrinsic::experimental_stackmap:
6636     visitStackmap(I);
6637     return;
6638   case Intrinsic::experimental_patchpoint_void:
6639   case Intrinsic::experimental_patchpoint_i64:
6640     visitPatchpoint(I);
6641     return;
6642   case Intrinsic::experimental_gc_statepoint:
6643     LowerStatepoint(cast<GCStatepointInst>(I));
6644     return;
6645   case Intrinsic::experimental_gc_result:
6646     visitGCResult(cast<GCResultInst>(I));
6647     return;
6648   case Intrinsic::experimental_gc_relocate:
6649     visitGCRelocate(cast<GCRelocateInst>(I));
6650     return;
6651   case Intrinsic::instrprof_increment:
6652     llvm_unreachable("instrprof failed to lower an increment");
6653   case Intrinsic::instrprof_value_profile:
6654     llvm_unreachable("instrprof failed to lower a value profiling call");
6655   case Intrinsic::localescape: {
6656     MachineFunction &MF = DAG.getMachineFunction();
6657     const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
6658 
6659     // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission
6660     // is the same on all targets.
6661     for (unsigned Idx = 0, E = I.getNumArgOperands(); Idx < E; ++Idx) {
6662       Value *Arg = I.getArgOperand(Idx)->stripPointerCasts();
6663       if (isa<ConstantPointerNull>(Arg))
6664         continue; // Skip null pointers. They represent a hole in index space.
6665       AllocaInst *Slot = cast<AllocaInst>(Arg);
6666       assert(FuncInfo.StaticAllocaMap.count(Slot) &&
6667              "can only escape static allocas");
6668       int FI = FuncInfo.StaticAllocaMap[Slot];
6669       MCSymbol *FrameAllocSym =
6670           MF.getMMI().getContext().getOrCreateFrameAllocSymbol(
6671               GlobalValue::dropLLVMManglingEscape(MF.getName()), Idx);
6672       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, dl,
6673               TII->get(TargetOpcode::LOCAL_ESCAPE))
6674           .addSym(FrameAllocSym)
6675           .addFrameIndex(FI);
6676     }
6677 
6678     return;
6679   }
6680 
6681   case Intrinsic::localrecover: {
6682     // i8* @llvm.localrecover(i8* %fn, i8* %fp, i32 %idx)
6683     MachineFunction &MF = DAG.getMachineFunction();
6684 
6685     // Get the symbol that defines the frame offset.
6686     auto *Fn = cast<Function>(I.getArgOperand(0)->stripPointerCasts());
6687     auto *Idx = cast<ConstantInt>(I.getArgOperand(2));
6688     unsigned IdxVal =
6689         unsigned(Idx->getLimitedValue(std::numeric_limits<int>::max()));
6690     MCSymbol *FrameAllocSym =
6691         MF.getMMI().getContext().getOrCreateFrameAllocSymbol(
6692             GlobalValue::dropLLVMManglingEscape(Fn->getName()), IdxVal);
6693 
6694     Value *FP = I.getArgOperand(1);
6695     SDValue FPVal = getValue(FP);
6696     EVT PtrVT = FPVal.getValueType();
6697 
6698     // Create a MCSymbol for the label to avoid any target lowering
6699     // that would make this PC relative.
6700     SDValue OffsetSym = DAG.getMCSymbol(FrameAllocSym, PtrVT);
6701     SDValue OffsetVal =
6702         DAG.getNode(ISD::LOCAL_RECOVER, sdl, PtrVT, OffsetSym);
6703 
6704     // Add the offset to the FP.
6705     SDValue Add = DAG.getMemBasePlusOffset(FPVal, OffsetVal, sdl);
6706     setValue(&I, Add);
6707 
6708     return;
6709   }
6710 
6711   case Intrinsic::eh_exceptionpointer:
6712   case Intrinsic::eh_exceptioncode: {
6713     // Get the exception pointer vreg, copy from it, and resize it to fit.
6714     const auto *CPI = cast<CatchPadInst>(I.getArgOperand(0));
6715     MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
6716     const TargetRegisterClass *PtrRC = TLI.getRegClassFor(PtrVT);
6717     unsigned VReg = FuncInfo.getCatchPadExceptionPointerVReg(CPI, PtrRC);
6718     SDValue N =
6719         DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(), VReg, PtrVT);
6720     if (Intrinsic == Intrinsic::eh_exceptioncode)
6721       N = DAG.getZExtOrTrunc(N, getCurSDLoc(), MVT::i32);
6722     setValue(&I, N);
6723     return;
6724   }
6725   case Intrinsic::xray_customevent: {
6726     // Here we want to make sure that the intrinsic behaves as if it has a
6727     // specific calling convention, and only for x86_64.
6728     // FIXME: Support other platforms later.
6729     const auto &Triple = DAG.getTarget().getTargetTriple();
6730     if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux())
6731       return;
6732 
6733     SDLoc DL = getCurSDLoc();
6734     SmallVector<SDValue, 8> Ops;
6735 
6736     // We want to say that we always want the arguments in registers.
6737     SDValue LogEntryVal = getValue(I.getArgOperand(0));
6738     SDValue StrSizeVal = getValue(I.getArgOperand(1));
6739     SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
6740     SDValue Chain = getRoot();
6741     Ops.push_back(LogEntryVal);
6742     Ops.push_back(StrSizeVal);
6743     Ops.push_back(Chain);
6744 
6745     // We need to enforce the calling convention for the callsite, so that
6746     // argument ordering is enforced correctly, and that register allocation can
6747     // see that some registers may be assumed clobbered and have to preserve
6748     // them across calls to the intrinsic.
6749     MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHABLE_EVENT_CALL,
6750                                            DL, NodeTys, Ops);
6751     SDValue patchableNode = SDValue(MN, 0);
6752     DAG.setRoot(patchableNode);
6753     setValue(&I, patchableNode);
6754     return;
6755   }
6756   case Intrinsic::xray_typedevent: {
6757     // Here we want to make sure that the intrinsic behaves as if it has a
6758     // specific calling convention, and only for x86_64.
6759     // FIXME: Support other platforms later.
6760     const auto &Triple = DAG.getTarget().getTargetTriple();
6761     if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux())
6762       return;
6763 
6764     SDLoc DL = getCurSDLoc();
6765     SmallVector<SDValue, 8> Ops;
6766 
6767     // We want to say that we always want the arguments in registers.
6768     // It's unclear to me how manipulating the selection DAG here forces callers
6769     // to provide arguments in registers instead of on the stack.
6770     SDValue LogTypeId = getValue(I.getArgOperand(0));
6771     SDValue LogEntryVal = getValue(I.getArgOperand(1));
6772     SDValue StrSizeVal = getValue(I.getArgOperand(2));
6773     SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
6774     SDValue Chain = getRoot();
6775     Ops.push_back(LogTypeId);
6776     Ops.push_back(LogEntryVal);
6777     Ops.push_back(StrSizeVal);
6778     Ops.push_back(Chain);
6779 
6780     // We need to enforce the calling convention for the callsite, so that
6781     // argument ordering is enforced correctly, and that register allocation can
6782     // see that some registers may be assumed clobbered and have to preserve
6783     // them across calls to the intrinsic.
6784     MachineSDNode *MN = DAG.getMachineNode(
6785         TargetOpcode::PATCHABLE_TYPED_EVENT_CALL, DL, NodeTys, Ops);
6786     SDValue patchableNode = SDValue(MN, 0);
6787     DAG.setRoot(patchableNode);
6788     setValue(&I, patchableNode);
6789     return;
6790   }
6791   case Intrinsic::experimental_deoptimize:
6792     LowerDeoptimizeCall(&I);
6793     return;
6794 
6795   case Intrinsic::experimental_vector_reduce_v2_fadd:
6796   case Intrinsic::experimental_vector_reduce_v2_fmul:
6797   case Intrinsic::experimental_vector_reduce_add:
6798   case Intrinsic::experimental_vector_reduce_mul:
6799   case Intrinsic::experimental_vector_reduce_and:
6800   case Intrinsic::experimental_vector_reduce_or:
6801   case Intrinsic::experimental_vector_reduce_xor:
6802   case Intrinsic::experimental_vector_reduce_smax:
6803   case Intrinsic::experimental_vector_reduce_smin:
6804   case Intrinsic::experimental_vector_reduce_umax:
6805   case Intrinsic::experimental_vector_reduce_umin:
6806   case Intrinsic::experimental_vector_reduce_fmax:
6807   case Intrinsic::experimental_vector_reduce_fmin:
6808     visitVectorReduce(I, Intrinsic);
6809     return;
6810 
6811   case Intrinsic::icall_branch_funnel: {
6812     SmallVector<SDValue, 16> Ops;
6813     Ops.push_back(getValue(I.getArgOperand(0)));
6814 
6815     int64_t Offset;
6816     auto *Base = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
6817         I.getArgOperand(1), Offset, DAG.getDataLayout()));
6818     if (!Base)
6819       report_fatal_error(
6820           "llvm.icall.branch.funnel operand must be a GlobalValue");
6821     Ops.push_back(DAG.getTargetGlobalAddress(Base, getCurSDLoc(), MVT::i64, 0));
6822 
6823     struct BranchFunnelTarget {
6824       int64_t Offset;
6825       SDValue Target;
6826     };
6827     SmallVector<BranchFunnelTarget, 8> Targets;
6828 
6829     for (unsigned Op = 1, N = I.getNumArgOperands(); Op != N; Op += 2) {
6830       auto *ElemBase = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
6831           I.getArgOperand(Op), Offset, DAG.getDataLayout()));
6832       if (ElemBase != Base)
6833         report_fatal_error("all llvm.icall.branch.funnel operands must refer "
6834                            "to the same GlobalValue");
6835 
6836       SDValue Val = getValue(I.getArgOperand(Op + 1));
6837       auto *GA = dyn_cast<GlobalAddressSDNode>(Val);
6838       if (!GA)
6839         report_fatal_error(
6840             "llvm.icall.branch.funnel operand must be a GlobalValue");
6841       Targets.push_back({Offset, DAG.getTargetGlobalAddress(
6842                                      GA->getGlobal(), getCurSDLoc(),
6843                                      Val.getValueType(), GA->getOffset())});
6844     }
6845     llvm::sort(Targets,
6846                [](const BranchFunnelTarget &T1, const BranchFunnelTarget &T2) {
6847                  return T1.Offset < T2.Offset;
6848                });
6849 
6850     for (auto &T : Targets) {
6851       Ops.push_back(DAG.getTargetConstant(T.Offset, getCurSDLoc(), MVT::i32));
6852       Ops.push_back(T.Target);
6853     }
6854 
6855     Ops.push_back(DAG.getRoot()); // Chain
6856     SDValue N(DAG.getMachineNode(TargetOpcode::ICALL_BRANCH_FUNNEL,
6857                                  getCurSDLoc(), MVT::Other, Ops),
6858               0);
6859     DAG.setRoot(N);
6860     setValue(&I, N);
6861     HasTailCall = true;
6862     return;
6863   }
6864 
6865   case Intrinsic::wasm_landingpad_index:
6866     // Information this intrinsic contained has been transferred to
6867     // MachineFunction in SelectionDAGISel::PrepareEHLandingPad. We can safely
6868     // delete it now.
6869     return;
6870 
6871   case Intrinsic::aarch64_settag:
6872   case Intrinsic::aarch64_settag_zero: {
6873     const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
6874     bool ZeroMemory = Intrinsic == Intrinsic::aarch64_settag_zero;
6875     SDValue Val = TSI.EmitTargetCodeForSetTag(
6876         DAG, getCurSDLoc(), getRoot(), getValue(I.getArgOperand(0)),
6877         getValue(I.getArgOperand(1)), MachinePointerInfo(I.getArgOperand(0)),
6878         ZeroMemory);
6879     DAG.setRoot(Val);
6880     setValue(&I, Val);
6881     return;
6882   }
6883   case Intrinsic::ptrmask: {
6884     SDValue Ptr = getValue(I.getOperand(0));
6885     SDValue Const = getValue(I.getOperand(1));
6886 
6887     EVT PtrVT = Ptr.getValueType();
6888     setValue(&I, DAG.getNode(ISD::AND, getCurSDLoc(), PtrVT, Ptr,
6889                              DAG.getZExtOrTrunc(Const, getCurSDLoc(), PtrVT)));
6890     return;
6891   }
6892   }
6893 }
6894 
6895 void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
6896     const ConstrainedFPIntrinsic &FPI) {
6897   SDLoc sdl = getCurSDLoc();
6898 
6899   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6900   SmallVector<EVT, 4> ValueVTs;
6901   ComputeValueVTs(TLI, DAG.getDataLayout(), FPI.getType(), ValueVTs);
6902   ValueVTs.push_back(MVT::Other); // Out chain
6903 
6904   // We do not need to serialize constrained FP intrinsics against
6905   // each other or against (nonvolatile) loads, so they can be
6906   // chained like loads.
6907   SDValue Chain = DAG.getRoot();
6908   SmallVector<SDValue, 4> Opers;
6909   Opers.push_back(Chain);
6910   if (FPI.isUnaryOp()) {
6911     Opers.push_back(getValue(FPI.getArgOperand(0)));
6912   } else if (FPI.isTernaryOp()) {
6913     Opers.push_back(getValue(FPI.getArgOperand(0)));
6914     Opers.push_back(getValue(FPI.getArgOperand(1)));
6915     Opers.push_back(getValue(FPI.getArgOperand(2)));
6916   } else {
6917     Opers.push_back(getValue(FPI.getArgOperand(0)));
6918     Opers.push_back(getValue(FPI.getArgOperand(1)));
6919   }
6920 
6921   auto pushOutChain = [this](SDValue Result, fp::ExceptionBehavior EB) {
6922     assert(Result.getNode()->getNumValues() == 2);
6923 
6924     // Push node to the appropriate list so that future instructions can be
6925     // chained up correctly.
6926     SDValue OutChain = Result.getValue(1);
6927     switch (EB) {
6928     case fp::ExceptionBehavior::ebIgnore:
6929       // The only reason why ebIgnore nodes still need to be chained is that
6930       // they might depend on the current rounding mode, and therefore must
6931       // not be moved across instruction that may change that mode.
6932       LLVM_FALLTHROUGH;
6933     case fp::ExceptionBehavior::ebMayTrap:
6934       // These must not be moved across calls or instructions that may change
6935       // floating-point exception masks.
6936       PendingConstrainedFP.push_back(OutChain);
6937       break;
6938     case fp::ExceptionBehavior::ebStrict:
6939       // These must not be moved across calls or instructions that may change
6940       // floating-point exception masks or read floating-point exception flags.
6941       // In addition, they cannot be optimized out even if unused.
6942       PendingConstrainedFPStrict.push_back(OutChain);
6943       break;
6944     }
6945   };
6946 
6947   SDVTList VTs = DAG.getVTList(ValueVTs);
6948   fp::ExceptionBehavior EB = FPI.getExceptionBehavior().getValue();
6949 
6950   SDNodeFlags Flags;
6951   if (EB == fp::ExceptionBehavior::ebIgnore)
6952     Flags.setNoFPExcept(true);
6953 
6954   if (auto *FPOp = dyn_cast<FPMathOperator>(&FPI))
6955     Flags.copyFMF(*FPOp);
6956 
6957   unsigned Opcode;
6958   switch (FPI.getIntrinsicID()) {
6959   default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
6960 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
6961   case Intrinsic::INTRINSIC:                                                   \
6962     Opcode = ISD::STRICT_##DAGN;                                               \
6963     break;
6964 #include "llvm/IR/ConstrainedOps.def"
6965   case Intrinsic::experimental_constrained_fmuladd: {
6966     Opcode = ISD::STRICT_FMA;
6967     // Break fmuladd into fmul and fadd.
6968     if (TM.Options.AllowFPOpFusion == FPOpFusion::Strict ||
6969         !TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(),
6970                                         ValueVTs[0])) {
6971       Opers.pop_back();
6972       SDValue Mul = DAG.getNode(ISD::STRICT_FMUL, sdl, VTs, Opers, Flags);
6973       pushOutChain(Mul, EB);
6974       Opcode = ISD::STRICT_FADD;
6975       Opers.clear();
6976       Opers.push_back(Mul.getValue(1));
6977       Opers.push_back(Mul.getValue(0));
6978       Opers.push_back(getValue(FPI.getArgOperand(2)));
6979     }
6980     break;
6981   }
6982   }
6983 
6984   // A few strict DAG nodes carry additional operands that are not
6985   // set up by the default code above.
6986   switch (Opcode) {
6987   default: break;
6988   case ISD::STRICT_FP_ROUND:
6989     Opers.push_back(
6990         DAG.getTargetConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout())));
6991     break;
6992   case ISD::STRICT_FSETCC:
6993   case ISD::STRICT_FSETCCS: {
6994     auto *FPCmp = dyn_cast<ConstrainedFPCmpIntrinsic>(&FPI);
6995     Opers.push_back(DAG.getCondCode(getFCmpCondCode(FPCmp->getPredicate())));
6996     break;
6997   }
6998   }
6999 
7000   SDValue Result = DAG.getNode(Opcode, sdl, VTs, Opers, Flags);
7001   pushOutChain(Result, EB);
7002 
7003   SDValue FPResult = Result.getValue(0);
7004   setValue(&FPI, FPResult);
7005 }
7006 
7007 std::pair<SDValue, SDValue>
7008 SelectionDAGBuilder::lowerInvokable(TargetLowering::CallLoweringInfo &CLI,
7009                                     const BasicBlock *EHPadBB) {
7010   MachineFunction &MF = DAG.getMachineFunction();
7011   MachineModuleInfo &MMI = MF.getMMI();
7012   MCSymbol *BeginLabel = nullptr;
7013 
7014   if (EHPadBB) {
7015     // Insert a label before the invoke call to mark the try range.  This can be
7016     // used to detect deletion of the invoke via the MachineModuleInfo.
7017     BeginLabel = MMI.getContext().createTempSymbol();
7018 
7019     // For SjLj, keep track of which landing pads go with which invokes
7020     // so as to maintain the ordering of pads in the LSDA.
7021     unsigned CallSiteIndex = MMI.getCurrentCallSite();
7022     if (CallSiteIndex) {
7023       MF.setCallSiteBeginLabel(BeginLabel, CallSiteIndex);
7024       LPadToCallSiteMap[FuncInfo.MBBMap[EHPadBB]].push_back(CallSiteIndex);
7025 
7026       // Now that the call site is handled, stop tracking it.
7027       MMI.setCurrentCallSite(0);
7028     }
7029 
7030     // Both PendingLoads and PendingExports must be flushed here;
7031     // this call might not return.
7032     (void)getRoot();
7033     DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getControlRoot(), BeginLabel));
7034 
7035     CLI.setChain(getRoot());
7036   }
7037   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7038   std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
7039 
7040   assert((CLI.IsTailCall || Result.second.getNode()) &&
7041          "Non-null chain expected with non-tail call!");
7042   assert((Result.second.getNode() || !Result.first.getNode()) &&
7043          "Null value expected with tail call!");
7044 
7045   if (!Result.second.getNode()) {
7046     // As a special case, a null chain means that a tail call has been emitted
7047     // and the DAG root is already updated.
7048     HasTailCall = true;
7049 
7050     // Since there's no actual continuation from this block, nothing can be
7051     // relying on us setting vregs for them.
7052     PendingExports.clear();
7053   } else {
7054     DAG.setRoot(Result.second);
7055   }
7056 
7057   if (EHPadBB) {
7058     // Insert a label at the end of the invoke call to mark the try range.  This
7059     // can be used to detect deletion of the invoke via the MachineModuleInfo.
7060     MCSymbol *EndLabel = MMI.getContext().createTempSymbol();
7061     DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getRoot(), EndLabel));
7062 
7063     // Inform MachineModuleInfo of range.
7064     auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
7065     // There is a platform (e.g. wasm) that uses funclet style IR but does not
7066     // actually use outlined funclets and their LSDA info style.
7067     if (MF.hasEHFunclets() && isFuncletEHPersonality(Pers)) {
7068       assert(CLI.CB);
7069       WinEHFuncInfo *EHInfo = DAG.getMachineFunction().getWinEHFuncInfo();
7070       EHInfo->addIPToStateRange(cast<InvokeInst>(CLI.CB), BeginLabel, EndLabel);
7071     } else if (!isScopedEHPersonality(Pers)) {
7072       MF.addInvoke(FuncInfo.MBBMap[EHPadBB], BeginLabel, EndLabel);
7073     }
7074   }
7075 
7076   return Result;
7077 }
7078 
7079 void SelectionDAGBuilder::LowerCallTo(const CallBase &CB, SDValue Callee,
7080                                       bool isTailCall,
7081                                       const BasicBlock *EHPadBB) {
7082   auto &DL = DAG.getDataLayout();
7083   FunctionType *FTy = CB.getFunctionType();
7084   Type *RetTy = CB.getType();
7085 
7086   TargetLowering::ArgListTy Args;
7087   Args.reserve(CB.arg_size());
7088 
7089   const Value *SwiftErrorVal = nullptr;
7090   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7091 
7092   if (isTailCall) {
7093     // Avoid emitting tail calls in functions with the disable-tail-calls
7094     // attribute.
7095     auto *Caller = CB.getParent()->getParent();
7096     if (Caller->getFnAttribute("disable-tail-calls").getValueAsString() ==
7097         "true")
7098       isTailCall = false;
7099 
7100     // We can't tail call inside a function with a swifterror argument. Lowering
7101     // does not support this yet. It would have to move into the swifterror
7102     // register before the call.
7103     if (TLI.supportSwiftError() &&
7104         Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
7105       isTailCall = false;
7106   }
7107 
7108   for (auto I = CB.arg_begin(), E = CB.arg_end(); I != E; ++I) {
7109     TargetLowering::ArgListEntry Entry;
7110     const Value *V = *I;
7111 
7112     // Skip empty types
7113     if (V->getType()->isEmptyTy())
7114       continue;
7115 
7116     SDValue ArgNode = getValue(V);
7117     Entry.Node = ArgNode; Entry.Ty = V->getType();
7118 
7119     Entry.setAttributes(&CB, I - CB.arg_begin());
7120 
7121     // Use swifterror virtual register as input to the call.
7122     if (Entry.IsSwiftError && TLI.supportSwiftError()) {
7123       SwiftErrorVal = V;
7124       // We find the virtual register for the actual swifterror argument.
7125       // Instead of using the Value, we use the virtual register instead.
7126       Entry.Node =
7127           DAG.getRegister(SwiftError.getOrCreateVRegUseAt(&CB, FuncInfo.MBB, V),
7128                           EVT(TLI.getPointerTy(DL)));
7129     }
7130 
7131     Args.push_back(Entry);
7132 
7133     // If we have an explicit sret argument that is an Instruction, (i.e., it
7134     // might point to function-local memory), we can't meaningfully tail-call.
7135     if (Entry.IsSRet && isa<Instruction>(V))
7136       isTailCall = false;
7137   }
7138 
7139   // If call site has a cfguardtarget operand bundle, create and add an
7140   // additional ArgListEntry.
7141   if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_cfguardtarget)) {
7142     TargetLowering::ArgListEntry Entry;
7143     Value *V = Bundle->Inputs[0];
7144     SDValue ArgNode = getValue(V);
7145     Entry.Node = ArgNode;
7146     Entry.Ty = V->getType();
7147     Entry.IsCFGuardTarget = true;
7148     Args.push_back(Entry);
7149   }
7150 
7151   // Check if target-independent constraints permit a tail call here.
7152   // Target-dependent constraints are checked within TLI->LowerCallTo.
7153   if (isTailCall && !isInTailCallPosition(CB, DAG.getTarget()))
7154     isTailCall = false;
7155 
7156   // Disable tail calls if there is an swifterror argument. Targets have not
7157   // been updated to support tail calls.
7158   if (TLI.supportSwiftError() && SwiftErrorVal)
7159     isTailCall = false;
7160 
7161   TargetLowering::CallLoweringInfo CLI(DAG);
7162   CLI.setDebugLoc(getCurSDLoc())
7163       .setChain(getRoot())
7164       .setCallee(RetTy, FTy, Callee, std::move(Args), CB)
7165       .setTailCall(isTailCall)
7166       .setConvergent(CB.isConvergent())
7167       .setIsPreallocated(
7168           CB.countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0);
7169   std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
7170 
7171   if (Result.first.getNode()) {
7172     Result.first = lowerRangeToAssertZExt(DAG, CB, Result.first);
7173     setValue(&CB, Result.first);
7174   }
7175 
7176   // The last element of CLI.InVals has the SDValue for swifterror return.
7177   // Here we copy it to a virtual register and update SwiftErrorMap for
7178   // book-keeping.
7179   if (SwiftErrorVal && TLI.supportSwiftError()) {
7180     // Get the last element of InVals.
7181     SDValue Src = CLI.InVals.back();
7182     Register VReg =
7183         SwiftError.getOrCreateVRegDefAt(&CB, FuncInfo.MBB, SwiftErrorVal);
7184     SDValue CopyNode = CLI.DAG.getCopyToReg(Result.second, CLI.DL, VReg, Src);
7185     DAG.setRoot(CopyNode);
7186   }
7187 }
7188 
7189 static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
7190                              SelectionDAGBuilder &Builder) {
7191   // Check to see if this load can be trivially constant folded, e.g. if the
7192   // input is from a string literal.
7193   if (const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) {
7194     // Cast pointer to the type we really want to load.
7195     Type *LoadTy =
7196         Type::getIntNTy(PtrVal->getContext(), LoadVT.getScalarSizeInBits());
7197     if (LoadVT.isVector())
7198       LoadTy = VectorType::get(LoadTy, LoadVT.getVectorNumElements());
7199 
7200     LoadInput = ConstantExpr::getBitCast(const_cast<Constant *>(LoadInput),
7201                                          PointerType::getUnqual(LoadTy));
7202 
7203     if (const Constant *LoadCst = ConstantFoldLoadFromConstPtr(
7204             const_cast<Constant *>(LoadInput), LoadTy, *Builder.DL))
7205       return Builder.getValue(LoadCst);
7206   }
7207 
7208   // Otherwise, we have to emit the load.  If the pointer is to unfoldable but
7209   // still constant memory, the input chain can be the entry node.
7210   SDValue Root;
7211   bool ConstantMemory = false;
7212 
7213   // Do not serialize (non-volatile) loads of constant memory with anything.
7214   if (Builder.AA && Builder.AA->pointsToConstantMemory(PtrVal)) {
7215     Root = Builder.DAG.getEntryNode();
7216     ConstantMemory = true;
7217   } else {
7218     // Do not serialize non-volatile loads against each other.
7219     Root = Builder.DAG.getRoot();
7220   }
7221 
7222   SDValue Ptr = Builder.getValue(PtrVal);
7223   SDValue LoadVal = Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root,
7224                                         Ptr, MachinePointerInfo(PtrVal),
7225                                         /* Alignment = */ 1);
7226 
7227   if (!ConstantMemory)
7228     Builder.PendingLoads.push_back(LoadVal.getValue(1));
7229   return LoadVal;
7230 }
7231 
7232 /// Record the value for an instruction that produces an integer result,
7233 /// converting the type where necessary.
7234 void SelectionDAGBuilder::processIntegerCallValue(const Instruction &I,
7235                                                   SDValue Value,
7236                                                   bool IsSigned) {
7237   EVT VT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
7238                                                     I.getType(), true);
7239   if (IsSigned)
7240     Value = DAG.getSExtOrTrunc(Value, getCurSDLoc(), VT);
7241   else
7242     Value = DAG.getZExtOrTrunc(Value, getCurSDLoc(), VT);
7243   setValue(&I, Value);
7244 }
7245 
7246 /// See if we can lower a memcmp call into an optimized form. If so, return
7247 /// true and lower it. Otherwise return false, and it will be lowered like a
7248 /// normal call.
7249 /// The caller already checked that \p I calls the appropriate LibFunc with a
7250 /// correct prototype.
7251 bool SelectionDAGBuilder::visitMemCmpCall(const CallInst &I) {
7252   const Value *LHS = I.getArgOperand(0), *RHS = I.getArgOperand(1);
7253   const Value *Size = I.getArgOperand(2);
7254   const ConstantInt *CSize = dyn_cast<ConstantInt>(Size);
7255   if (CSize && CSize->getZExtValue() == 0) {
7256     EVT CallVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
7257                                                           I.getType(), true);
7258     setValue(&I, DAG.getConstant(0, getCurSDLoc(), CallVT));
7259     return true;
7260   }
7261 
7262   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7263   std::pair<SDValue, SDValue> Res = TSI.EmitTargetCodeForMemcmp(
7264       DAG, getCurSDLoc(), DAG.getRoot(), getValue(LHS), getValue(RHS),
7265       getValue(Size), MachinePointerInfo(LHS), MachinePointerInfo(RHS));
7266   if (Res.first.getNode()) {
7267     processIntegerCallValue(I, Res.first, true);
7268     PendingLoads.push_back(Res.second);
7269     return true;
7270   }
7271 
7272   // memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS)  != 0
7273   // memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS)  != 0
7274   if (!CSize || !isOnlyUsedInZeroEqualityComparison(&I))
7275     return false;
7276 
7277   // If the target has a fast compare for the given size, it will return a
7278   // preferred load type for that size. Require that the load VT is legal and
7279   // that the target supports unaligned loads of that type. Otherwise, return
7280   // INVALID.
7281   auto hasFastLoadsAndCompare = [&](unsigned NumBits) {
7282     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7283     MVT LVT = TLI.hasFastEqualityCompare(NumBits);
7284     if (LVT != MVT::INVALID_SIMPLE_VALUE_TYPE) {
7285       // TODO: Handle 5 byte compare as 4-byte + 1 byte.
7286       // TODO: Handle 8 byte compare on x86-32 as two 32-bit loads.
7287       // TODO: Check alignment of src and dest ptrs.
7288       unsigned DstAS = LHS->getType()->getPointerAddressSpace();
7289       unsigned SrcAS = RHS->getType()->getPointerAddressSpace();
7290       if (!TLI.isTypeLegal(LVT) ||
7291           !TLI.allowsMisalignedMemoryAccesses(LVT, SrcAS) ||
7292           !TLI.allowsMisalignedMemoryAccesses(LVT, DstAS))
7293         LVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
7294     }
7295 
7296     return LVT;
7297   };
7298 
7299   // This turns into unaligned loads. We only do this if the target natively
7300   // supports the MVT we'll be loading or if it is small enough (<= 4) that
7301   // we'll only produce a small number of byte loads.
7302   MVT LoadVT;
7303   unsigned NumBitsToCompare = CSize->getZExtValue() * 8;
7304   switch (NumBitsToCompare) {
7305   default:
7306     return false;
7307   case 16:
7308     LoadVT = MVT::i16;
7309     break;
7310   case 32:
7311     LoadVT = MVT::i32;
7312     break;
7313   case 64:
7314   case 128:
7315   case 256:
7316     LoadVT = hasFastLoadsAndCompare(NumBitsToCompare);
7317     break;
7318   }
7319 
7320   if (LoadVT == MVT::INVALID_SIMPLE_VALUE_TYPE)
7321     return false;
7322 
7323   SDValue LoadL = getMemCmpLoad(LHS, LoadVT, *this);
7324   SDValue LoadR = getMemCmpLoad(RHS, LoadVT, *this);
7325 
7326   // Bitcast to a wide integer type if the loads are vectors.
7327   if (LoadVT.isVector()) {
7328     EVT CmpVT = EVT::getIntegerVT(LHS->getContext(), LoadVT.getSizeInBits());
7329     LoadL = DAG.getBitcast(CmpVT, LoadL);
7330     LoadR = DAG.getBitcast(CmpVT, LoadR);
7331   }
7332 
7333   SDValue Cmp = DAG.getSetCC(getCurSDLoc(), MVT::i1, LoadL, LoadR, ISD::SETNE);
7334   processIntegerCallValue(I, Cmp, false);
7335   return true;
7336 }
7337 
7338 /// See if we can lower a memchr call into an optimized form. If so, return
7339 /// true and lower it. Otherwise return false, and it will be lowered like a
7340 /// normal call.
7341 /// The caller already checked that \p I calls the appropriate LibFunc with a
7342 /// correct prototype.
7343 bool SelectionDAGBuilder::visitMemChrCall(const CallInst &I) {
7344   const Value *Src = I.getArgOperand(0);
7345   const Value *Char = I.getArgOperand(1);
7346   const Value *Length = I.getArgOperand(2);
7347 
7348   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7349   std::pair<SDValue, SDValue> Res =
7350     TSI.EmitTargetCodeForMemchr(DAG, getCurSDLoc(), DAG.getRoot(),
7351                                 getValue(Src), getValue(Char), getValue(Length),
7352                                 MachinePointerInfo(Src));
7353   if (Res.first.getNode()) {
7354     setValue(&I, Res.first);
7355     PendingLoads.push_back(Res.second);
7356     return true;
7357   }
7358 
7359   return false;
7360 }
7361 
7362 /// See if we can lower a mempcpy call into an optimized form. If so, return
7363 /// true and lower it. Otherwise return false, and it will be lowered like a
7364 /// normal call.
7365 /// The caller already checked that \p I calls the appropriate LibFunc with a
7366 /// correct prototype.
7367 bool SelectionDAGBuilder::visitMemPCpyCall(const CallInst &I) {
7368   SDValue Dst = getValue(I.getArgOperand(0));
7369   SDValue Src = getValue(I.getArgOperand(1));
7370   SDValue Size = getValue(I.getArgOperand(2));
7371 
7372   Align DstAlign = DAG.InferPtrAlign(Dst).valueOrOne();
7373   Align SrcAlign = DAG.InferPtrAlign(Src).valueOrOne();
7374   // DAG::getMemcpy needs Alignment to be defined.
7375   Align Alignment = std::min(DstAlign, SrcAlign);
7376 
7377   bool isVol = false;
7378   SDLoc sdl = getCurSDLoc();
7379 
7380   // In the mempcpy context we need to pass in a false value for isTailCall
7381   // because the return pointer needs to be adjusted by the size of
7382   // the copied memory.
7383   SDValue Root = isVol ? getRoot() : getMemoryRoot();
7384   SDValue MC = DAG.getMemcpy(Root, sdl, Dst, Src, Size, Alignment, isVol, false,
7385                              /*isTailCall=*/false,
7386                              MachinePointerInfo(I.getArgOperand(0)),
7387                              MachinePointerInfo(I.getArgOperand(1)));
7388   assert(MC.getNode() != nullptr &&
7389          "** memcpy should not be lowered as TailCall in mempcpy context **");
7390   DAG.setRoot(MC);
7391 
7392   // Check if Size needs to be truncated or extended.
7393   Size = DAG.getSExtOrTrunc(Size, sdl, Dst.getValueType());
7394 
7395   // Adjust return pointer to point just past the last dst byte.
7396   SDValue DstPlusSize = DAG.getNode(ISD::ADD, sdl, Dst.getValueType(),
7397                                     Dst, Size);
7398   setValue(&I, DstPlusSize);
7399   return true;
7400 }
7401 
7402 /// See if we can lower a strcpy call into an optimized form.  If so, return
7403 /// true and lower it, otherwise return false and it will be lowered like a
7404 /// normal call.
7405 /// The caller already checked that \p I calls the appropriate LibFunc with a
7406 /// correct prototype.
7407 bool SelectionDAGBuilder::visitStrCpyCall(const CallInst &I, bool isStpcpy) {
7408   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
7409 
7410   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7411   std::pair<SDValue, SDValue> Res =
7412     TSI.EmitTargetCodeForStrcpy(DAG, getCurSDLoc(), getRoot(),
7413                                 getValue(Arg0), getValue(Arg1),
7414                                 MachinePointerInfo(Arg0),
7415                                 MachinePointerInfo(Arg1), isStpcpy);
7416   if (Res.first.getNode()) {
7417     setValue(&I, Res.first);
7418     DAG.setRoot(Res.second);
7419     return true;
7420   }
7421 
7422   return false;
7423 }
7424 
7425 /// See if we can lower a strcmp call into an optimized form.  If so, return
7426 /// true and lower it, otherwise return false and it will be lowered like a
7427 /// normal call.
7428 /// The caller already checked that \p I calls the appropriate LibFunc with a
7429 /// correct prototype.
7430 bool SelectionDAGBuilder::visitStrCmpCall(const CallInst &I) {
7431   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
7432 
7433   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7434   std::pair<SDValue, SDValue> Res =
7435     TSI.EmitTargetCodeForStrcmp(DAG, getCurSDLoc(), DAG.getRoot(),
7436                                 getValue(Arg0), getValue(Arg1),
7437                                 MachinePointerInfo(Arg0),
7438                                 MachinePointerInfo(Arg1));
7439   if (Res.first.getNode()) {
7440     processIntegerCallValue(I, Res.first, true);
7441     PendingLoads.push_back(Res.second);
7442     return true;
7443   }
7444 
7445   return false;
7446 }
7447 
7448 /// See if we can lower a strlen call into an optimized form.  If so, return
7449 /// true and lower it, otherwise return false and it will be lowered like a
7450 /// normal call.
7451 /// The caller already checked that \p I calls the appropriate LibFunc with a
7452 /// correct prototype.
7453 bool SelectionDAGBuilder::visitStrLenCall(const CallInst &I) {
7454   const Value *Arg0 = I.getArgOperand(0);
7455 
7456   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7457   std::pair<SDValue, SDValue> Res =
7458     TSI.EmitTargetCodeForStrlen(DAG, getCurSDLoc(), DAG.getRoot(),
7459                                 getValue(Arg0), MachinePointerInfo(Arg0));
7460   if (Res.first.getNode()) {
7461     processIntegerCallValue(I, Res.first, false);
7462     PendingLoads.push_back(Res.second);
7463     return true;
7464   }
7465 
7466   return false;
7467 }
7468 
7469 /// See if we can lower a strnlen call into an optimized form.  If so, return
7470 /// true and lower it, otherwise return false and it will be lowered like a
7471 /// normal call.
7472 /// The caller already checked that \p I calls the appropriate LibFunc with a
7473 /// correct prototype.
7474 bool SelectionDAGBuilder::visitStrNLenCall(const CallInst &I) {
7475   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
7476 
7477   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7478   std::pair<SDValue, SDValue> Res =
7479     TSI.EmitTargetCodeForStrnlen(DAG, getCurSDLoc(), DAG.getRoot(),
7480                                  getValue(Arg0), getValue(Arg1),
7481                                  MachinePointerInfo(Arg0));
7482   if (Res.first.getNode()) {
7483     processIntegerCallValue(I, Res.first, false);
7484     PendingLoads.push_back(Res.second);
7485     return true;
7486   }
7487 
7488   return false;
7489 }
7490 
7491 /// See if we can lower a unary floating-point operation into an SDNode with
7492 /// the specified Opcode.  If so, return true and lower it, otherwise return
7493 /// false and it will be lowered like a normal call.
7494 /// The caller already checked that \p I calls the appropriate LibFunc with a
7495 /// correct prototype.
7496 bool SelectionDAGBuilder::visitUnaryFloatCall(const CallInst &I,
7497                                               unsigned Opcode) {
7498   // We already checked this call's prototype; verify it doesn't modify errno.
7499   if (!I.onlyReadsMemory())
7500     return false;
7501 
7502   SDValue Tmp = getValue(I.getArgOperand(0));
7503   setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), Tmp.getValueType(), Tmp));
7504   return true;
7505 }
7506 
7507 /// See if we can lower a binary floating-point operation into an SDNode with
7508 /// the specified Opcode. If so, return true and lower it. Otherwise return
7509 /// false, and it will be lowered like a normal call.
7510 /// The caller already checked that \p I calls the appropriate LibFunc with a
7511 /// correct prototype.
7512 bool SelectionDAGBuilder::visitBinaryFloatCall(const CallInst &I,
7513                                                unsigned Opcode) {
7514   // We already checked this call's prototype; verify it doesn't modify errno.
7515   if (!I.onlyReadsMemory())
7516     return false;
7517 
7518   SDValue Tmp0 = getValue(I.getArgOperand(0));
7519   SDValue Tmp1 = getValue(I.getArgOperand(1));
7520   EVT VT = Tmp0.getValueType();
7521   setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), VT, Tmp0, Tmp1));
7522   return true;
7523 }
7524 
7525 void SelectionDAGBuilder::visitCall(const CallInst &I) {
7526   // Handle inline assembly differently.
7527   if (I.isInlineAsm()) {
7528     visitInlineAsm(I);
7529     return;
7530   }
7531 
7532   if (Function *F = I.getCalledFunction()) {
7533     if (F->isDeclaration()) {
7534       // Is this an LLVM intrinsic or a target-specific intrinsic?
7535       unsigned IID = F->getIntrinsicID();
7536       if (!IID)
7537         if (const TargetIntrinsicInfo *II = TM.getIntrinsicInfo())
7538           IID = II->getIntrinsicID(F);
7539 
7540       if (IID) {
7541         visitIntrinsicCall(I, IID);
7542         return;
7543       }
7544     }
7545 
7546     // Check for well-known libc/libm calls.  If the function is internal, it
7547     // can't be a library call.  Don't do the check if marked as nobuiltin for
7548     // some reason or the call site requires strict floating point semantics.
7549     LibFunc Func;
7550     if (!I.isNoBuiltin() && !I.isStrictFP() && !F->hasLocalLinkage() &&
7551         F->hasName() && LibInfo->getLibFunc(*F, Func) &&
7552         LibInfo->hasOptimizedCodeGen(Func)) {
7553       switch (Func) {
7554       default: break;
7555       case LibFunc_copysign:
7556       case LibFunc_copysignf:
7557       case LibFunc_copysignl:
7558         // We already checked this call's prototype; verify it doesn't modify
7559         // errno.
7560         if (I.onlyReadsMemory()) {
7561           SDValue LHS = getValue(I.getArgOperand(0));
7562           SDValue RHS = getValue(I.getArgOperand(1));
7563           setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurSDLoc(),
7564                                    LHS.getValueType(), LHS, RHS));
7565           return;
7566         }
7567         break;
7568       case LibFunc_fabs:
7569       case LibFunc_fabsf:
7570       case LibFunc_fabsl:
7571         if (visitUnaryFloatCall(I, ISD::FABS))
7572           return;
7573         break;
7574       case LibFunc_fmin:
7575       case LibFunc_fminf:
7576       case LibFunc_fminl:
7577         if (visitBinaryFloatCall(I, ISD::FMINNUM))
7578           return;
7579         break;
7580       case LibFunc_fmax:
7581       case LibFunc_fmaxf:
7582       case LibFunc_fmaxl:
7583         if (visitBinaryFloatCall(I, ISD::FMAXNUM))
7584           return;
7585         break;
7586       case LibFunc_sin:
7587       case LibFunc_sinf:
7588       case LibFunc_sinl:
7589         if (visitUnaryFloatCall(I, ISD::FSIN))
7590           return;
7591         break;
7592       case LibFunc_cos:
7593       case LibFunc_cosf:
7594       case LibFunc_cosl:
7595         if (visitUnaryFloatCall(I, ISD::FCOS))
7596           return;
7597         break;
7598       case LibFunc_sqrt:
7599       case LibFunc_sqrtf:
7600       case LibFunc_sqrtl:
7601       case LibFunc_sqrt_finite:
7602       case LibFunc_sqrtf_finite:
7603       case LibFunc_sqrtl_finite:
7604         if (visitUnaryFloatCall(I, ISD::FSQRT))
7605           return;
7606         break;
7607       case LibFunc_floor:
7608       case LibFunc_floorf:
7609       case LibFunc_floorl:
7610         if (visitUnaryFloatCall(I, ISD::FFLOOR))
7611           return;
7612         break;
7613       case LibFunc_nearbyint:
7614       case LibFunc_nearbyintf:
7615       case LibFunc_nearbyintl:
7616         if (visitUnaryFloatCall(I, ISD::FNEARBYINT))
7617           return;
7618         break;
7619       case LibFunc_ceil:
7620       case LibFunc_ceilf:
7621       case LibFunc_ceill:
7622         if (visitUnaryFloatCall(I, ISD::FCEIL))
7623           return;
7624         break;
7625       case LibFunc_rint:
7626       case LibFunc_rintf:
7627       case LibFunc_rintl:
7628         if (visitUnaryFloatCall(I, ISD::FRINT))
7629           return;
7630         break;
7631       case LibFunc_round:
7632       case LibFunc_roundf:
7633       case LibFunc_roundl:
7634         if (visitUnaryFloatCall(I, ISD::FROUND))
7635           return;
7636         break;
7637       case LibFunc_trunc:
7638       case LibFunc_truncf:
7639       case LibFunc_truncl:
7640         if (visitUnaryFloatCall(I, ISD::FTRUNC))
7641           return;
7642         break;
7643       case LibFunc_log2:
7644       case LibFunc_log2f:
7645       case LibFunc_log2l:
7646         if (visitUnaryFloatCall(I, ISD::FLOG2))
7647           return;
7648         break;
7649       case LibFunc_exp2:
7650       case LibFunc_exp2f:
7651       case LibFunc_exp2l:
7652         if (visitUnaryFloatCall(I, ISD::FEXP2))
7653           return;
7654         break;
7655       case LibFunc_memcmp:
7656         if (visitMemCmpCall(I))
7657           return;
7658         break;
7659       case LibFunc_mempcpy:
7660         if (visitMemPCpyCall(I))
7661           return;
7662         break;
7663       case LibFunc_memchr:
7664         if (visitMemChrCall(I))
7665           return;
7666         break;
7667       case LibFunc_strcpy:
7668         if (visitStrCpyCall(I, false))
7669           return;
7670         break;
7671       case LibFunc_stpcpy:
7672         if (visitStrCpyCall(I, true))
7673           return;
7674         break;
7675       case LibFunc_strcmp:
7676         if (visitStrCmpCall(I))
7677           return;
7678         break;
7679       case LibFunc_strlen:
7680         if (visitStrLenCall(I))
7681           return;
7682         break;
7683       case LibFunc_strnlen:
7684         if (visitStrNLenCall(I))
7685           return;
7686         break;
7687       }
7688     }
7689   }
7690 
7691   // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
7692   // have to do anything here to lower funclet bundles.
7693   // CFGuardTarget bundles are lowered in LowerCallTo.
7694   assert(!I.hasOperandBundlesOtherThan(
7695              {LLVMContext::OB_deopt, LLVMContext::OB_funclet,
7696               LLVMContext::OB_cfguardtarget, LLVMContext::OB_preallocated}) &&
7697          "Cannot lower calls with arbitrary operand bundles!");
7698 
7699   SDValue Callee = getValue(I.getCalledOperand());
7700 
7701   if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
7702     LowerCallSiteWithDeoptBundle(&I, Callee, nullptr);
7703   else
7704     // Check if we can potentially perform a tail call. More detailed checking
7705     // is be done within LowerCallTo, after more information about the call is
7706     // known.
7707     LowerCallTo(I, Callee, I.isTailCall());
7708 }
7709 
7710 namespace {
7711 
7712 /// AsmOperandInfo - This contains information for each constraint that we are
7713 /// lowering.
7714 class SDISelAsmOperandInfo : public TargetLowering::AsmOperandInfo {
7715 public:
7716   /// CallOperand - If this is the result output operand or a clobber
7717   /// this is null, otherwise it is the incoming operand to the CallInst.
7718   /// This gets modified as the asm is processed.
7719   SDValue CallOperand;
7720 
7721   /// AssignedRegs - If this is a register or register class operand, this
7722   /// contains the set of register corresponding to the operand.
7723   RegsForValue AssignedRegs;
7724 
7725   explicit SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &info)
7726     : TargetLowering::AsmOperandInfo(info), CallOperand(nullptr, 0) {
7727   }
7728 
7729   /// Whether or not this operand accesses memory
7730   bool hasMemory(const TargetLowering &TLI) const {
7731     // Indirect operand accesses access memory.
7732     if (isIndirect)
7733       return true;
7734 
7735     for (const auto &Code : Codes)
7736       if (TLI.getConstraintType(Code) == TargetLowering::C_Memory)
7737         return true;
7738 
7739     return false;
7740   }
7741 
7742   /// getCallOperandValEVT - Return the EVT of the Value* that this operand
7743   /// corresponds to.  If there is no Value* for this operand, it returns
7744   /// MVT::Other.
7745   EVT getCallOperandValEVT(LLVMContext &Context, const TargetLowering &TLI,
7746                            const DataLayout &DL) const {
7747     if (!CallOperandVal) return MVT::Other;
7748 
7749     if (isa<BasicBlock>(CallOperandVal))
7750       return TLI.getProgramPointerTy(DL);
7751 
7752     llvm::Type *OpTy = CallOperandVal->getType();
7753 
7754     // FIXME: code duplicated from TargetLowering::ParseConstraints().
7755     // If this is an indirect operand, the operand is a pointer to the
7756     // accessed type.
7757     if (isIndirect) {
7758       PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
7759       if (!PtrTy)
7760         report_fatal_error("Indirect operand for inline asm not a pointer!");
7761       OpTy = PtrTy->getElementType();
7762     }
7763 
7764     // Look for vector wrapped in a struct. e.g. { <16 x i8> }.
7765     if (StructType *STy = dyn_cast<StructType>(OpTy))
7766       if (STy->getNumElements() == 1)
7767         OpTy = STy->getElementType(0);
7768 
7769     // If OpTy is not a single value, it may be a struct/union that we
7770     // can tile with integers.
7771     if (!OpTy->isSingleValueType() && OpTy->isSized()) {
7772       unsigned BitSize = DL.getTypeSizeInBits(OpTy);
7773       switch (BitSize) {
7774       default: break;
7775       case 1:
7776       case 8:
7777       case 16:
7778       case 32:
7779       case 64:
7780       case 128:
7781         OpTy = IntegerType::get(Context, BitSize);
7782         break;
7783       }
7784     }
7785 
7786     return TLI.getValueType(DL, OpTy, true);
7787   }
7788 };
7789 
7790 using SDISelAsmOperandInfoVector = SmallVector<SDISelAsmOperandInfo, 16>;
7791 
7792 } // end anonymous namespace
7793 
7794 /// Make sure that the output operand \p OpInfo and its corresponding input
7795 /// operand \p MatchingOpInfo have compatible constraint types (otherwise error
7796 /// out).
7797 static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo,
7798                                SDISelAsmOperandInfo &MatchingOpInfo,
7799                                SelectionDAG &DAG) {
7800   if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT)
7801     return;
7802 
7803   const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
7804   const auto &TLI = DAG.getTargetLoweringInfo();
7805 
7806   std::pair<unsigned, const TargetRegisterClass *> MatchRC =
7807       TLI.getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode,
7808                                        OpInfo.ConstraintVT);
7809   std::pair<unsigned, const TargetRegisterClass *> InputRC =
7810       TLI.getRegForInlineAsmConstraint(TRI, MatchingOpInfo.ConstraintCode,
7811                                        MatchingOpInfo.ConstraintVT);
7812   if ((OpInfo.ConstraintVT.isInteger() !=
7813        MatchingOpInfo.ConstraintVT.isInteger()) ||
7814       (MatchRC.second != InputRC.second)) {
7815     // FIXME: error out in a more elegant fashion
7816     report_fatal_error("Unsupported asm: input constraint"
7817                        " with a matching output constraint of"
7818                        " incompatible type!");
7819   }
7820   MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT;
7821 }
7822 
7823 /// Get a direct memory input to behave well as an indirect operand.
7824 /// This may introduce stores, hence the need for a \p Chain.
7825 /// \return The (possibly updated) chain.
7826 static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location,
7827                                         SDISelAsmOperandInfo &OpInfo,
7828                                         SelectionDAG &DAG) {
7829   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7830 
7831   // If we don't have an indirect input, put it in the constpool if we can,
7832   // otherwise spill it to a stack slot.
7833   // TODO: This isn't quite right. We need to handle these according to
7834   // the addressing mode that the constraint wants. Also, this may take
7835   // an additional register for the computation and we don't want that
7836   // either.
7837 
7838   // If the operand is a float, integer, or vector constant, spill to a
7839   // constant pool entry to get its address.
7840   const Value *OpVal = OpInfo.CallOperandVal;
7841   if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
7842       isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) {
7843     OpInfo.CallOperand = DAG.getConstantPool(
7844         cast<Constant>(OpVal), TLI.getPointerTy(DAG.getDataLayout()));
7845     return Chain;
7846   }
7847 
7848   // Otherwise, create a stack slot and emit a store to it before the asm.
7849   Type *Ty = OpVal->getType();
7850   auto &DL = DAG.getDataLayout();
7851   uint64_t TySize = DL.getTypeAllocSize(Ty);
7852   unsigned Align = DL.getPrefTypeAlignment(Ty);
7853   MachineFunction &MF = DAG.getMachineFunction();
7854   int SSFI = MF.getFrameInfo().CreateStackObject(TySize, Align, false);
7855   SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getFrameIndexTy(DL));
7856   Chain = DAG.getTruncStore(Chain, Location, OpInfo.CallOperand, StackSlot,
7857                             MachinePointerInfo::getFixedStack(MF, SSFI),
7858                             TLI.getMemValueType(DL, Ty));
7859   OpInfo.CallOperand = StackSlot;
7860 
7861   return Chain;
7862 }
7863 
7864 /// GetRegistersForValue - Assign registers (virtual or physical) for the
7865 /// specified operand.  We prefer to assign virtual registers, to allow the
7866 /// register allocator to handle the assignment process.  However, if the asm
7867 /// uses features that we can't model on machineinstrs, we have SDISel do the
7868 /// allocation.  This produces generally horrible, but correct, code.
7869 ///
7870 ///   OpInfo describes the operand
7871 ///   RefOpInfo describes the matching operand if any, the operand otherwise
7872 static void GetRegistersForValue(SelectionDAG &DAG, const SDLoc &DL,
7873                                  SDISelAsmOperandInfo &OpInfo,
7874                                  SDISelAsmOperandInfo &RefOpInfo) {
7875   LLVMContext &Context = *DAG.getContext();
7876   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7877 
7878   MachineFunction &MF = DAG.getMachineFunction();
7879   SmallVector<unsigned, 4> Regs;
7880   const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
7881 
7882   // No work to do for memory operations.
7883   if (OpInfo.ConstraintType == TargetLowering::C_Memory)
7884     return;
7885 
7886   // If this is a constraint for a single physreg, or a constraint for a
7887   // register class, find it.
7888   unsigned AssignedReg;
7889   const TargetRegisterClass *RC;
7890   std::tie(AssignedReg, RC) = TLI.getRegForInlineAsmConstraint(
7891       &TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT);
7892   // RC is unset only on failure. Return immediately.
7893   if (!RC)
7894     return;
7895 
7896   // Get the actual register value type.  This is important, because the user
7897   // may have asked for (e.g.) the AX register in i32 type.  We need to
7898   // remember that AX is actually i16 to get the right extension.
7899   const MVT RegVT = *TRI.legalclasstypes_begin(*RC);
7900 
7901   if (OpInfo.ConstraintVT != MVT::Other) {
7902     // If this is an FP operand in an integer register (or visa versa), or more
7903     // generally if the operand value disagrees with the register class we plan
7904     // to stick it in, fix the operand type.
7905     //
7906     // If this is an input value, the bitcast to the new type is done now.
7907     // Bitcast for output value is done at the end of visitInlineAsm().
7908     if ((OpInfo.Type == InlineAsm::isOutput ||
7909          OpInfo.Type == InlineAsm::isInput) &&
7910         !TRI.isTypeLegalForClass(*RC, OpInfo.ConstraintVT)) {
7911       // Try to convert to the first EVT that the reg class contains.  If the
7912       // types are identical size, use a bitcast to convert (e.g. two differing
7913       // vector types).  Note: output bitcast is done at the end of
7914       // visitInlineAsm().
7915       if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
7916         // Exclude indirect inputs while they are unsupported because the code
7917         // to perform the load is missing and thus OpInfo.CallOperand still
7918         // refers to the input address rather than the pointed-to value.
7919         if (OpInfo.Type == InlineAsm::isInput && !OpInfo.isIndirect)
7920           OpInfo.CallOperand =
7921               DAG.getNode(ISD::BITCAST, DL, RegVT, OpInfo.CallOperand);
7922         OpInfo.ConstraintVT = RegVT;
7923         // If the operand is an FP value and we want it in integer registers,
7924         // use the corresponding integer type. This turns an f64 value into
7925         // i64, which can be passed with two i32 values on a 32-bit machine.
7926       } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
7927         MVT VT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits());
7928         if (OpInfo.Type == InlineAsm::isInput)
7929           OpInfo.CallOperand =
7930               DAG.getNode(ISD::BITCAST, DL, VT, OpInfo.CallOperand);
7931         OpInfo.ConstraintVT = VT;
7932       }
7933     }
7934   }
7935 
7936   // No need to allocate a matching input constraint since the constraint it's
7937   // matching to has already been allocated.
7938   if (OpInfo.isMatchingInputConstraint())
7939     return;
7940 
7941   EVT ValueVT = OpInfo.ConstraintVT;
7942   if (OpInfo.ConstraintVT == MVT::Other)
7943     ValueVT = RegVT;
7944 
7945   // Initialize NumRegs.
7946   unsigned NumRegs = 1;
7947   if (OpInfo.ConstraintVT != MVT::Other)
7948     NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT);
7949 
7950   // If this is a constraint for a specific physical register, like {r17},
7951   // assign it now.
7952 
7953   // If this associated to a specific register, initialize iterator to correct
7954   // place. If virtual, make sure we have enough registers
7955 
7956   // Initialize iterator if necessary
7957   TargetRegisterClass::iterator I = RC->begin();
7958   MachineRegisterInfo &RegInfo = MF.getRegInfo();
7959 
7960   // Do not check for single registers.
7961   if (AssignedReg) {
7962       for (; *I != AssignedReg; ++I)
7963         assert(I != RC->end() && "AssignedReg should be member of RC");
7964   }
7965 
7966   for (; NumRegs; --NumRegs, ++I) {
7967     assert(I != RC->end() && "Ran out of registers to allocate!");
7968     Register R = AssignedReg ? Register(*I) : RegInfo.createVirtualRegister(RC);
7969     Regs.push_back(R);
7970   }
7971 
7972   OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
7973 }
7974 
7975 static unsigned
7976 findMatchingInlineAsmOperand(unsigned OperandNo,
7977                              const std::vector<SDValue> &AsmNodeOperands) {
7978   // Scan until we find the definition we already emitted of this operand.
7979   unsigned CurOp = InlineAsm::Op_FirstOperand;
7980   for (; OperandNo; --OperandNo) {
7981     // Advance to the next operand.
7982     unsigned OpFlag =
7983         cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
7984     assert((InlineAsm::isRegDefKind(OpFlag) ||
7985             InlineAsm::isRegDefEarlyClobberKind(OpFlag) ||
7986             InlineAsm::isMemKind(OpFlag)) &&
7987            "Skipped past definitions?");
7988     CurOp += InlineAsm::getNumOperandRegisters(OpFlag) + 1;
7989   }
7990   return CurOp;
7991 }
7992 
7993 namespace {
7994 
7995 class ExtraFlags {
7996   unsigned Flags = 0;
7997 
7998 public:
7999   explicit ExtraFlags(const CallBase &Call) {
8000     const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
8001     if (IA->hasSideEffects())
8002       Flags |= InlineAsm::Extra_HasSideEffects;
8003     if (IA->isAlignStack())
8004       Flags |= InlineAsm::Extra_IsAlignStack;
8005     if (Call.isConvergent())
8006       Flags |= InlineAsm::Extra_IsConvergent;
8007     Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
8008   }
8009 
8010   void update(const TargetLowering::AsmOperandInfo &OpInfo) {
8011     // Ideally, we would only check against memory constraints.  However, the
8012     // meaning of an Other constraint can be target-specific and we can't easily
8013     // reason about it.  Therefore, be conservative and set MayLoad/MayStore
8014     // for Other constraints as well.
8015     if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
8016         OpInfo.ConstraintType == TargetLowering::C_Other) {
8017       if (OpInfo.Type == InlineAsm::isInput)
8018         Flags |= InlineAsm::Extra_MayLoad;
8019       else if (OpInfo.Type == InlineAsm::isOutput)
8020         Flags |= InlineAsm::Extra_MayStore;
8021       else if (OpInfo.Type == InlineAsm::isClobber)
8022         Flags |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore);
8023     }
8024   }
8025 
8026   unsigned get() const { return Flags; }
8027 };
8028 
8029 } // end anonymous namespace
8030 
8031 /// visitInlineAsm - Handle a call to an InlineAsm object.
8032 void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call) {
8033   const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
8034 
8035   /// ConstraintOperands - Information about all of the constraints.
8036   SDISelAsmOperandInfoVector ConstraintOperands;
8037 
8038   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8039   TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints(
8040       DAG.getDataLayout(), DAG.getSubtarget().getRegisterInfo(), Call);
8041 
8042   // First Pass: Calculate HasSideEffects and ExtraFlags (AlignStack,
8043   // AsmDialect, MayLoad, MayStore).
8044   bool HasSideEffect = IA->hasSideEffects();
8045   ExtraFlags ExtraInfo(Call);
8046 
8047   unsigned ArgNo = 0;   // ArgNo - The argument of the CallInst.
8048   unsigned ResNo = 0;   // ResNo - The result number of the next output.
8049   unsigned NumMatchingOps = 0;
8050   for (auto &T : TargetConstraints) {
8051     ConstraintOperands.push_back(SDISelAsmOperandInfo(T));
8052     SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
8053 
8054     // Compute the value type for each operand.
8055     if (OpInfo.Type == InlineAsm::isInput ||
8056         (OpInfo.Type == InlineAsm::isOutput && OpInfo.isIndirect)) {
8057       OpInfo.CallOperandVal = Call.getArgOperand(ArgNo++);
8058 
8059       // Process the call argument. BasicBlocks are labels, currently appearing
8060       // only in asm's.
8061       if (isa<CallBrInst>(Call) &&
8062           ArgNo - 1 >= (cast<CallBrInst>(&Call)->getNumArgOperands() -
8063                         cast<CallBrInst>(&Call)->getNumIndirectDests() -
8064                         NumMatchingOps) &&
8065           (NumMatchingOps == 0 ||
8066            ArgNo - 1 < (cast<CallBrInst>(&Call)->getNumArgOperands() -
8067                         NumMatchingOps))) {
8068         const auto *BA = cast<BlockAddress>(OpInfo.CallOperandVal);
8069         EVT VT = TLI.getValueType(DAG.getDataLayout(), BA->getType(), true);
8070         OpInfo.CallOperand = DAG.getTargetBlockAddress(BA, VT);
8071       } else if (const auto *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
8072         OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
8073       } else {
8074         OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
8075       }
8076 
8077       OpInfo.ConstraintVT =
8078           OpInfo
8079               .getCallOperandValEVT(*DAG.getContext(), TLI, DAG.getDataLayout())
8080               .getSimpleVT();
8081     } else if (OpInfo.Type == InlineAsm::isOutput && !OpInfo.isIndirect) {
8082       // The return value of the call is this value.  As such, there is no
8083       // corresponding argument.
8084       assert(!Call.getType()->isVoidTy() && "Bad inline asm!");
8085       if (StructType *STy = dyn_cast<StructType>(Call.getType())) {
8086         OpInfo.ConstraintVT = TLI.getSimpleValueType(
8087             DAG.getDataLayout(), STy->getElementType(ResNo));
8088       } else {
8089         assert(ResNo == 0 && "Asm only has one result!");
8090         OpInfo.ConstraintVT =
8091             TLI.getSimpleValueType(DAG.getDataLayout(), Call.getType());
8092       }
8093       ++ResNo;
8094     } else {
8095       OpInfo.ConstraintVT = MVT::Other;
8096     }
8097 
8098     if (OpInfo.hasMatchingInput())
8099       ++NumMatchingOps;
8100 
8101     if (!HasSideEffect)
8102       HasSideEffect = OpInfo.hasMemory(TLI);
8103 
8104     // Determine if this InlineAsm MayLoad or MayStore based on the constraints.
8105     // FIXME: Could we compute this on OpInfo rather than T?
8106 
8107     // Compute the constraint code and ConstraintType to use.
8108     TLI.ComputeConstraintToUse(T, SDValue());
8109 
8110     if (T.ConstraintType == TargetLowering::C_Immediate &&
8111         OpInfo.CallOperand && !isa<ConstantSDNode>(OpInfo.CallOperand))
8112       // We've delayed emitting a diagnostic like the "n" constraint because
8113       // inlining could cause an integer showing up.
8114       return emitInlineAsmError(Call, "constraint '" + Twine(T.ConstraintCode) +
8115                                           "' expects an integer constant "
8116                                           "expression");
8117 
8118     ExtraInfo.update(T);
8119   }
8120 
8121 
8122   // We won't need to flush pending loads if this asm doesn't touch
8123   // memory and is nonvolatile.
8124   SDValue Flag, Chain = (HasSideEffect) ? getRoot() : DAG.getRoot();
8125 
8126   bool IsCallBr = isa<CallBrInst>(Call);
8127   if (IsCallBr) {
8128     // If this is a callbr we need to flush pending exports since inlineasm_br
8129     // is a terminator. We need to do this before nodes are glued to
8130     // the inlineasm_br node.
8131     Chain = getControlRoot();
8132   }
8133 
8134   // Second pass over the constraints: compute which constraint option to use.
8135   for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
8136     // If this is an output operand with a matching input operand, look up the
8137     // matching input. If their types mismatch, e.g. one is an integer, the
8138     // other is floating point, or their sizes are different, flag it as an
8139     // error.
8140     if (OpInfo.hasMatchingInput()) {
8141       SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
8142       patchMatchingInput(OpInfo, Input, DAG);
8143     }
8144 
8145     // Compute the constraint code and ConstraintType to use.
8146     TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
8147 
8148     if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
8149         OpInfo.Type == InlineAsm::isClobber)
8150       continue;
8151 
8152     // If this is a memory input, and if the operand is not indirect, do what we
8153     // need to provide an address for the memory input.
8154     if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
8155         !OpInfo.isIndirect) {
8156       assert((OpInfo.isMultipleAlternative ||
8157               (OpInfo.Type == InlineAsm::isInput)) &&
8158              "Can only indirectify direct input operands!");
8159 
8160       // Memory operands really want the address of the value.
8161       Chain = getAddressForMemoryInput(Chain, getCurSDLoc(), OpInfo, DAG);
8162 
8163       // There is no longer a Value* corresponding to this operand.
8164       OpInfo.CallOperandVal = nullptr;
8165 
8166       // It is now an indirect operand.
8167       OpInfo.isIndirect = true;
8168     }
8169 
8170   }
8171 
8172   // AsmNodeOperands - The operands for the ISD::INLINEASM node.
8173   std::vector<SDValue> AsmNodeOperands;
8174   AsmNodeOperands.push_back(SDValue());  // reserve space for input chain
8175   AsmNodeOperands.push_back(DAG.getTargetExternalSymbol(
8176       IA->getAsmString().c_str(), TLI.getProgramPointerTy(DAG.getDataLayout())));
8177 
8178   // If we have a !srcloc metadata node associated with it, we want to attach
8179   // this to the ultimately generated inline asm machineinstr.  To do this, we
8180   // pass in the third operand as this (potentially null) inline asm MDNode.
8181   const MDNode *SrcLoc = Call.getMetadata("srcloc");
8182   AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc));
8183 
8184   // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore
8185   // bits as operand 3.
8186   AsmNodeOperands.push_back(DAG.getTargetConstant(
8187       ExtraInfo.get(), getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
8188 
8189   // Third pass: Loop over operands to prepare DAG-level operands.. As part of
8190   // this, assign virtual and physical registers for inputs and otput.
8191   for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
8192     // Assign Registers.
8193     SDISelAsmOperandInfo &RefOpInfo =
8194         OpInfo.isMatchingInputConstraint()
8195             ? ConstraintOperands[OpInfo.getMatchedOperand()]
8196             : OpInfo;
8197     GetRegistersForValue(DAG, getCurSDLoc(), OpInfo, RefOpInfo);
8198 
8199     auto DetectWriteToReservedRegister = [&]() {
8200       const MachineFunction &MF = DAG.getMachineFunction();
8201       const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
8202       for (unsigned Reg : OpInfo.AssignedRegs.Regs) {
8203         if (Register::isPhysicalRegister(Reg) &&
8204             TRI.isInlineAsmReadOnlyReg(MF, Reg)) {
8205           const char *RegName = TRI.getName(Reg);
8206           emitInlineAsmError(Call, "write to reserved register '" +
8207                                        Twine(RegName) + "'");
8208           return true;
8209         }
8210       }
8211       return false;
8212     };
8213 
8214     switch (OpInfo.Type) {
8215     case InlineAsm::isOutput:
8216       if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
8217         unsigned ConstraintID =
8218             TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
8219         assert(ConstraintID != InlineAsm::Constraint_Unknown &&
8220                "Failed to convert memory constraint code to constraint id.");
8221 
8222         // Add information to the INLINEASM node to know about this output.
8223         unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
8224         OpFlags = InlineAsm::getFlagWordForMem(OpFlags, ConstraintID);
8225         AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlags, getCurSDLoc(),
8226                                                         MVT::i32));
8227         AsmNodeOperands.push_back(OpInfo.CallOperand);
8228       } else {
8229         // Otherwise, this outputs to a register (directly for C_Register /
8230         // C_RegisterClass, and a target-defined fashion for
8231         // C_Immediate/C_Other). Find a register that we can use.
8232         if (OpInfo.AssignedRegs.Regs.empty()) {
8233           emitInlineAsmError(
8234               Call, "couldn't allocate output register for constraint '" +
8235                         Twine(OpInfo.ConstraintCode) + "'");
8236           return;
8237         }
8238 
8239         if (DetectWriteToReservedRegister())
8240           return;
8241 
8242         // Add information to the INLINEASM node to know that this register is
8243         // set.
8244         OpInfo.AssignedRegs.AddInlineAsmOperands(
8245             OpInfo.isEarlyClobber ? InlineAsm::Kind_RegDefEarlyClobber
8246                                   : InlineAsm::Kind_RegDef,
8247             false, 0, getCurSDLoc(), DAG, AsmNodeOperands);
8248       }
8249       break;
8250 
8251     case InlineAsm::isInput: {
8252       SDValue InOperandVal = OpInfo.CallOperand;
8253 
8254       if (OpInfo.isMatchingInputConstraint()) {
8255         // If this is required to match an output register we have already set,
8256         // just use its register.
8257         auto CurOp = findMatchingInlineAsmOperand(OpInfo.getMatchedOperand(),
8258                                                   AsmNodeOperands);
8259         unsigned OpFlag =
8260           cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
8261         if (InlineAsm::isRegDefKind(OpFlag) ||
8262             InlineAsm::isRegDefEarlyClobberKind(OpFlag)) {
8263           // Add (OpFlag&0xffff)>>3 registers to MatchedRegs.
8264           if (OpInfo.isIndirect) {
8265             // This happens on gcc/testsuite/gcc.dg/pr8788-1.c
8266             emitInlineAsmError(Call, "inline asm not supported yet: "
8267                                      "don't know how to handle tied "
8268                                      "indirect register inputs");
8269             return;
8270           }
8271 
8272           MVT RegVT = AsmNodeOperands[CurOp+1].getSimpleValueType();
8273           SmallVector<unsigned, 4> Regs;
8274 
8275           if (const TargetRegisterClass *RC = TLI.getRegClassFor(RegVT)) {
8276             unsigned NumRegs = InlineAsm::getNumOperandRegisters(OpFlag);
8277             MachineRegisterInfo &RegInfo =
8278                 DAG.getMachineFunction().getRegInfo();
8279             for (unsigned i = 0; i != NumRegs; ++i)
8280               Regs.push_back(RegInfo.createVirtualRegister(RC));
8281           } else {
8282             emitInlineAsmError(Call,
8283                                "inline asm error: This value type register "
8284                                "class is not natively supported!");
8285             return;
8286           }
8287 
8288           RegsForValue MatchedRegs(Regs, RegVT, InOperandVal.getValueType());
8289 
8290           SDLoc dl = getCurSDLoc();
8291           // Use the produced MatchedRegs object to
8292           MatchedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Flag, &Call);
8293           MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse,
8294                                            true, OpInfo.getMatchedOperand(), dl,
8295                                            DAG, AsmNodeOperands);
8296           break;
8297         }
8298 
8299         assert(InlineAsm::isMemKind(OpFlag) && "Unknown matching constraint!");
8300         assert(InlineAsm::getNumOperandRegisters(OpFlag) == 1 &&
8301                "Unexpected number of operands");
8302         // Add information to the INLINEASM node to know about this input.
8303         // See InlineAsm.h isUseOperandTiedToDef.
8304         OpFlag = InlineAsm::convertMemFlagWordToMatchingFlagWord(OpFlag);
8305         OpFlag = InlineAsm::getFlagWordForMatchingOp(OpFlag,
8306                                                     OpInfo.getMatchedOperand());
8307         AsmNodeOperands.push_back(DAG.getTargetConstant(
8308             OpFlag, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
8309         AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
8310         break;
8311       }
8312 
8313       // Treat indirect 'X' constraint as memory.
8314       if (OpInfo.ConstraintType == TargetLowering::C_Other &&
8315           OpInfo.isIndirect)
8316         OpInfo.ConstraintType = TargetLowering::C_Memory;
8317 
8318       if (OpInfo.ConstraintType == TargetLowering::C_Immediate ||
8319           OpInfo.ConstraintType == TargetLowering::C_Other) {
8320         std::vector<SDValue> Ops;
8321         TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode,
8322                                           Ops, DAG);
8323         if (Ops.empty()) {
8324           if (OpInfo.ConstraintType == TargetLowering::C_Immediate)
8325             if (isa<ConstantSDNode>(InOperandVal)) {
8326               emitInlineAsmError(Call, "value out of range for constraint '" +
8327                                            Twine(OpInfo.ConstraintCode) + "'");
8328               return;
8329             }
8330 
8331           emitInlineAsmError(Call,
8332                              "invalid operand for inline asm constraint '" +
8333                                  Twine(OpInfo.ConstraintCode) + "'");
8334           return;
8335         }
8336 
8337         // Add information to the INLINEASM node to know about this input.
8338         unsigned ResOpType =
8339           InlineAsm::getFlagWord(InlineAsm::Kind_Imm, Ops.size());
8340         AsmNodeOperands.push_back(DAG.getTargetConstant(
8341             ResOpType, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
8342         AsmNodeOperands.insert(AsmNodeOperands.end(), Ops.begin(), Ops.end());
8343         break;
8344       }
8345 
8346       if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
8347         assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
8348         assert(InOperandVal.getValueType() ==
8349                    TLI.getPointerTy(DAG.getDataLayout()) &&
8350                "Memory operands expect pointer values");
8351 
8352         unsigned ConstraintID =
8353             TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
8354         assert(ConstraintID != InlineAsm::Constraint_Unknown &&
8355                "Failed to convert memory constraint code to constraint id.");
8356 
8357         // Add information to the INLINEASM node to know about this input.
8358         unsigned ResOpType = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
8359         ResOpType = InlineAsm::getFlagWordForMem(ResOpType, ConstraintID);
8360         AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
8361                                                         getCurSDLoc(),
8362                                                         MVT::i32));
8363         AsmNodeOperands.push_back(InOperandVal);
8364         break;
8365       }
8366 
8367       assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
8368               OpInfo.ConstraintType == TargetLowering::C_Register) &&
8369              "Unknown constraint type!");
8370 
8371       // TODO: Support this.
8372       if (OpInfo.isIndirect) {
8373         emitInlineAsmError(
8374             Call, "Don't know how to handle indirect register inputs yet "
8375                   "for constraint '" +
8376                       Twine(OpInfo.ConstraintCode) + "'");
8377         return;
8378       }
8379 
8380       // Copy the input into the appropriate registers.
8381       if (OpInfo.AssignedRegs.Regs.empty()) {
8382         emitInlineAsmError(Call,
8383                            "couldn't allocate input reg for constraint '" +
8384                                Twine(OpInfo.ConstraintCode) + "'");
8385         return;
8386       }
8387 
8388       if (DetectWriteToReservedRegister())
8389         return;
8390 
8391       SDLoc dl = getCurSDLoc();
8392 
8393       OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Flag,
8394                                         &Call);
8395 
8396       OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse, false, 0,
8397                                                dl, DAG, AsmNodeOperands);
8398       break;
8399     }
8400     case InlineAsm::isClobber:
8401       // Add the clobbered value to the operand list, so that the register
8402       // allocator is aware that the physreg got clobbered.
8403       if (!OpInfo.AssignedRegs.Regs.empty())
8404         OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_Clobber,
8405                                                  false, 0, getCurSDLoc(), DAG,
8406                                                  AsmNodeOperands);
8407       break;
8408     }
8409   }
8410 
8411   // Finish up input operands.  Set the input chain and add the flag last.
8412   AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
8413   if (Flag.getNode()) AsmNodeOperands.push_back(Flag);
8414 
8415   unsigned ISDOpc = IsCallBr ? ISD::INLINEASM_BR : ISD::INLINEASM;
8416   Chain = DAG.getNode(ISDOpc, getCurSDLoc(),
8417                       DAG.getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
8418   Flag = Chain.getValue(1);
8419 
8420   // Do additional work to generate outputs.
8421 
8422   SmallVector<EVT, 1> ResultVTs;
8423   SmallVector<SDValue, 1> ResultValues;
8424   SmallVector<SDValue, 8> OutChains;
8425 
8426   llvm::Type *CallResultType = Call.getType();
8427   ArrayRef<Type *> ResultTypes;
8428   if (StructType *StructResult = dyn_cast<StructType>(CallResultType))
8429     ResultTypes = StructResult->elements();
8430   else if (!CallResultType->isVoidTy())
8431     ResultTypes = makeArrayRef(CallResultType);
8432 
8433   auto CurResultType = ResultTypes.begin();
8434   auto handleRegAssign = [&](SDValue V) {
8435     assert(CurResultType != ResultTypes.end() && "Unexpected value");
8436     assert((*CurResultType)->isSized() && "Unexpected unsized type");
8437     EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), *CurResultType);
8438     ++CurResultType;
8439     // If the type of the inline asm call site return value is different but has
8440     // same size as the type of the asm output bitcast it.  One example of this
8441     // is for vectors with different width / number of elements.  This can
8442     // happen for register classes that can contain multiple different value
8443     // types.  The preg or vreg allocated may not have the same VT as was
8444     // expected.
8445     //
8446     // This can also happen for a return value that disagrees with the register
8447     // class it is put in, eg. a double in a general-purpose register on a
8448     // 32-bit machine.
8449     if (ResultVT != V.getValueType() &&
8450         ResultVT.getSizeInBits() == V.getValueSizeInBits())
8451       V = DAG.getNode(ISD::BITCAST, getCurSDLoc(), ResultVT, V);
8452     else if (ResultVT != V.getValueType() && ResultVT.isInteger() &&
8453              V.getValueType().isInteger()) {
8454       // If a result value was tied to an input value, the computed result
8455       // may have a wider width than the expected result.  Extract the
8456       // relevant portion.
8457       V = DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), ResultVT, V);
8458     }
8459     assert(ResultVT == V.getValueType() && "Asm result value mismatch!");
8460     ResultVTs.push_back(ResultVT);
8461     ResultValues.push_back(V);
8462   };
8463 
8464   // Deal with output operands.
8465   for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
8466     if (OpInfo.Type == InlineAsm::isOutput) {
8467       SDValue Val;
8468       // Skip trivial output operands.
8469       if (OpInfo.AssignedRegs.Regs.empty())
8470         continue;
8471 
8472       switch (OpInfo.ConstraintType) {
8473       case TargetLowering::C_Register:
8474       case TargetLowering::C_RegisterClass:
8475         Val = OpInfo.AssignedRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(),
8476                                                   Chain, &Flag, &Call);
8477         break;
8478       case TargetLowering::C_Immediate:
8479       case TargetLowering::C_Other:
8480         Val = TLI.LowerAsmOutputForConstraint(Chain, Flag, getCurSDLoc(),
8481                                               OpInfo, DAG);
8482         break;
8483       case TargetLowering::C_Memory:
8484         break; // Already handled.
8485       case TargetLowering::C_Unknown:
8486         assert(false && "Unexpected unknown constraint");
8487       }
8488 
8489       // Indirect output manifest as stores. Record output chains.
8490       if (OpInfo.isIndirect) {
8491         const Value *Ptr = OpInfo.CallOperandVal;
8492         assert(Ptr && "Expected value CallOperandVal for indirect asm operand");
8493         SDValue Store = DAG.getStore(Chain, getCurSDLoc(), Val, getValue(Ptr),
8494                                      MachinePointerInfo(Ptr));
8495         OutChains.push_back(Store);
8496       } else {
8497         // generate CopyFromRegs to associated registers.
8498         assert(!Call.getType()->isVoidTy() && "Bad inline asm!");
8499         if (Val.getOpcode() == ISD::MERGE_VALUES) {
8500           for (const SDValue &V : Val->op_values())
8501             handleRegAssign(V);
8502         } else
8503           handleRegAssign(Val);
8504       }
8505     }
8506   }
8507 
8508   // Set results.
8509   if (!ResultValues.empty()) {
8510     assert(CurResultType == ResultTypes.end() &&
8511            "Mismatch in number of ResultTypes");
8512     assert(ResultValues.size() == ResultTypes.size() &&
8513            "Mismatch in number of output operands in asm result");
8514 
8515     SDValue V = DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
8516                             DAG.getVTList(ResultVTs), ResultValues);
8517     setValue(&Call, V);
8518   }
8519 
8520   // Collect store chains.
8521   if (!OutChains.empty())
8522     Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, OutChains);
8523 
8524   // Only Update Root if inline assembly has a memory effect.
8525   if (ResultValues.empty() || HasSideEffect || !OutChains.empty() || IsCallBr)
8526     DAG.setRoot(Chain);
8527 }
8528 
8529 void SelectionDAGBuilder::emitInlineAsmError(const CallBase &Call,
8530                                              const Twine &Message) {
8531   LLVMContext &Ctx = *DAG.getContext();
8532   Ctx.emitError(&Call, Message);
8533 
8534   // Make sure we leave the DAG in a valid state
8535   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8536   SmallVector<EVT, 1> ValueVTs;
8537   ComputeValueVTs(TLI, DAG.getDataLayout(), Call.getType(), ValueVTs);
8538 
8539   if (ValueVTs.empty())
8540     return;
8541 
8542   SmallVector<SDValue, 1> Ops;
8543   for (unsigned i = 0, e = ValueVTs.size(); i != e; ++i)
8544     Ops.push_back(DAG.getUNDEF(ValueVTs[i]));
8545 
8546   setValue(&Call, DAG.getMergeValues(Ops, getCurSDLoc()));
8547 }
8548 
8549 void SelectionDAGBuilder::visitVAStart(const CallInst &I) {
8550   DAG.setRoot(DAG.getNode(ISD::VASTART, getCurSDLoc(),
8551                           MVT::Other, getRoot(),
8552                           getValue(I.getArgOperand(0)),
8553                           DAG.getSrcValue(I.getArgOperand(0))));
8554 }
8555 
8556 void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
8557   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8558   const DataLayout &DL = DAG.getDataLayout();
8559   SDValue V = DAG.getVAArg(
8560       TLI.getMemValueType(DAG.getDataLayout(), I.getType()), getCurSDLoc(),
8561       getRoot(), getValue(I.getOperand(0)), DAG.getSrcValue(I.getOperand(0)),
8562       DL.getABITypeAlignment(I.getType()));
8563   DAG.setRoot(V.getValue(1));
8564 
8565   if (I.getType()->isPointerTy())
8566     V = DAG.getPtrExtOrTrunc(
8567         V, getCurSDLoc(), TLI.getValueType(DAG.getDataLayout(), I.getType()));
8568   setValue(&I, V);
8569 }
8570 
8571 void SelectionDAGBuilder::visitVAEnd(const CallInst &I) {
8572   DAG.setRoot(DAG.getNode(ISD::VAEND, getCurSDLoc(),
8573                           MVT::Other, getRoot(),
8574                           getValue(I.getArgOperand(0)),
8575                           DAG.getSrcValue(I.getArgOperand(0))));
8576 }
8577 
8578 void SelectionDAGBuilder::visitVACopy(const CallInst &I) {
8579   DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurSDLoc(),
8580                           MVT::Other, getRoot(),
8581                           getValue(I.getArgOperand(0)),
8582                           getValue(I.getArgOperand(1)),
8583                           DAG.getSrcValue(I.getArgOperand(0)),
8584                           DAG.getSrcValue(I.getArgOperand(1))));
8585 }
8586 
8587 SDValue SelectionDAGBuilder::lowerRangeToAssertZExt(SelectionDAG &DAG,
8588                                                     const Instruction &I,
8589                                                     SDValue Op) {
8590   const MDNode *Range = I.getMetadata(LLVMContext::MD_range);
8591   if (!Range)
8592     return Op;
8593 
8594   ConstantRange CR = getConstantRangeFromMetadata(*Range);
8595   if (CR.isFullSet() || CR.isEmptySet() || CR.isUpperWrapped())
8596     return Op;
8597 
8598   APInt Lo = CR.getUnsignedMin();
8599   if (!Lo.isMinValue())
8600     return Op;
8601 
8602   APInt Hi = CR.getUnsignedMax();
8603   unsigned Bits = std::max(Hi.getActiveBits(),
8604                            static_cast<unsigned>(IntegerType::MIN_INT_BITS));
8605 
8606   EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
8607 
8608   SDLoc SL = getCurSDLoc();
8609 
8610   SDValue ZExt = DAG.getNode(ISD::AssertZext, SL, Op.getValueType(), Op,
8611                              DAG.getValueType(SmallVT));
8612   unsigned NumVals = Op.getNode()->getNumValues();
8613   if (NumVals == 1)
8614     return ZExt;
8615 
8616   SmallVector<SDValue, 4> Ops;
8617 
8618   Ops.push_back(ZExt);
8619   for (unsigned I = 1; I != NumVals; ++I)
8620     Ops.push_back(Op.getValue(I));
8621 
8622   return DAG.getMergeValues(Ops, SL);
8623 }
8624 
8625 /// Populate a CallLowerinInfo (into \p CLI) based on the properties of
8626 /// the call being lowered.
8627 ///
8628 /// This is a helper for lowering intrinsics that follow a target calling
8629 /// convention or require stack pointer adjustment. Only a subset of the
8630 /// intrinsic's operands need to participate in the calling convention.
8631 void SelectionDAGBuilder::populateCallLoweringInfo(
8632     TargetLowering::CallLoweringInfo &CLI, const CallBase *Call,
8633     unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy,
8634     bool IsPatchPoint) {
8635   TargetLowering::ArgListTy Args;
8636   Args.reserve(NumArgs);
8637 
8638   // Populate the argument list.
8639   // Attributes for args start at offset 1, after the return attribute.
8640   for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs;
8641        ArgI != ArgE; ++ArgI) {
8642     const Value *V = Call->getOperand(ArgI);
8643 
8644     assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
8645 
8646     TargetLowering::ArgListEntry Entry;
8647     Entry.Node = getValue(V);
8648     Entry.Ty = V->getType();
8649     Entry.setAttributes(Call, ArgI);
8650     Args.push_back(Entry);
8651   }
8652 
8653   CLI.setDebugLoc(getCurSDLoc())
8654       .setChain(getRoot())
8655       .setCallee(Call->getCallingConv(), ReturnTy, Callee, std::move(Args))
8656       .setDiscardResult(Call->use_empty())
8657       .setIsPatchPoint(IsPatchPoint)
8658       .setIsPreallocated(
8659           Call->countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0);
8660 }
8661 
8662 /// Add a stack map intrinsic call's live variable operands to a stackmap
8663 /// or patchpoint target node's operand list.
8664 ///
8665 /// Constants are converted to TargetConstants purely as an optimization to
8666 /// avoid constant materialization and register allocation.
8667 ///
8668 /// FrameIndex operands are converted to TargetFrameIndex so that ISEL does not
8669 /// generate addess computation nodes, and so FinalizeISel can convert the
8670 /// TargetFrameIndex into a DirectMemRefOp StackMap location. This avoids
8671 /// address materialization and register allocation, but may also be required
8672 /// for correctness. If a StackMap (or PatchPoint) intrinsic directly uses an
8673 /// alloca in the entry block, then the runtime may assume that the alloca's
8674 /// StackMap location can be read immediately after compilation and that the
8675 /// location is valid at any point during execution (this is similar to the
8676 /// assumption made by the llvm.gcroot intrinsic). If the alloca's location were
8677 /// only available in a register, then the runtime would need to trap when
8678 /// execution reaches the StackMap in order to read the alloca's location.
8679 static void addStackMapLiveVars(const CallBase &Call, unsigned StartIdx,
8680                                 const SDLoc &DL, SmallVectorImpl<SDValue> &Ops,
8681                                 SelectionDAGBuilder &Builder) {
8682   for (unsigned i = StartIdx, e = Call.arg_size(); i != e; ++i) {
8683     SDValue OpVal = Builder.getValue(Call.getArgOperand(i));
8684     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(OpVal)) {
8685       Ops.push_back(
8686         Builder.DAG.getTargetConstant(StackMaps::ConstantOp, DL, MVT::i64));
8687       Ops.push_back(
8688         Builder.DAG.getTargetConstant(C->getSExtValue(), DL, MVT::i64));
8689     } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(OpVal)) {
8690       const TargetLowering &TLI = Builder.DAG.getTargetLoweringInfo();
8691       Ops.push_back(Builder.DAG.getTargetFrameIndex(
8692           FI->getIndex(), TLI.getFrameIndexTy(Builder.DAG.getDataLayout())));
8693     } else
8694       Ops.push_back(OpVal);
8695   }
8696 }
8697 
8698 /// Lower llvm.experimental.stackmap directly to its target opcode.
8699 void SelectionDAGBuilder::visitStackmap(const CallInst &CI) {
8700   // void @llvm.experimental.stackmap(i32 <id>, i32 <numShadowBytes>,
8701   //                                  [live variables...])
8702 
8703   assert(CI.getType()->isVoidTy() && "Stackmap cannot return a value.");
8704 
8705   SDValue Chain, InFlag, Callee, NullPtr;
8706   SmallVector<SDValue, 32> Ops;
8707 
8708   SDLoc DL = getCurSDLoc();
8709   Callee = getValue(CI.getCalledOperand());
8710   NullPtr = DAG.getIntPtrConstant(0, DL, true);
8711 
8712   // The stackmap intrinsic only records the live variables (the arguments
8713   // passed to it) and emits NOPS (if requested). Unlike the patchpoint
8714   // intrinsic, this won't be lowered to a function call. This means we don't
8715   // have to worry about calling conventions and target specific lowering code.
8716   // Instead we perform the call lowering right here.
8717   //
8718   // chain, flag = CALLSEQ_START(chain, 0, 0)
8719   // chain, flag = STACKMAP(id, nbytes, ..., chain, flag)
8720   // chain, flag = CALLSEQ_END(chain, 0, 0, flag)
8721   //
8722   Chain = DAG.getCALLSEQ_START(getRoot(), 0, 0, DL);
8723   InFlag = Chain.getValue(1);
8724 
8725   // Add the <id> and <numBytes> constants.
8726   SDValue IDVal = getValue(CI.getOperand(PatchPointOpers::IDPos));
8727   Ops.push_back(DAG.getTargetConstant(
8728                   cast<ConstantSDNode>(IDVal)->getZExtValue(), DL, MVT::i64));
8729   SDValue NBytesVal = getValue(CI.getOperand(PatchPointOpers::NBytesPos));
8730   Ops.push_back(DAG.getTargetConstant(
8731                   cast<ConstantSDNode>(NBytesVal)->getZExtValue(), DL,
8732                   MVT::i32));
8733 
8734   // Push live variables for the stack map.
8735   addStackMapLiveVars(CI, 2, DL, Ops, *this);
8736 
8737   // We are not pushing any register mask info here on the operands list,
8738   // because the stackmap doesn't clobber anything.
8739 
8740   // Push the chain and the glue flag.
8741   Ops.push_back(Chain);
8742   Ops.push_back(InFlag);
8743 
8744   // Create the STACKMAP node.
8745   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
8746   SDNode *SM = DAG.getMachineNode(TargetOpcode::STACKMAP, DL, NodeTys, Ops);
8747   Chain = SDValue(SM, 0);
8748   InFlag = Chain.getValue(1);
8749 
8750   Chain = DAG.getCALLSEQ_END(Chain, NullPtr, NullPtr, InFlag, DL);
8751 
8752   // Stackmaps don't generate values, so nothing goes into the NodeMap.
8753 
8754   // Set the root to the target-lowered call chain.
8755   DAG.setRoot(Chain);
8756 
8757   // Inform the Frame Information that we have a stackmap in this function.
8758   FuncInfo.MF->getFrameInfo().setHasStackMap();
8759 }
8760 
8761 /// Lower llvm.experimental.patchpoint directly to its target opcode.
8762 void SelectionDAGBuilder::visitPatchpoint(const CallBase &CB,
8763                                           const BasicBlock *EHPadBB) {
8764   // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
8765   //                                                 i32 <numBytes>,
8766   //                                                 i8* <target>,
8767   //                                                 i32 <numArgs>,
8768   //                                                 [Args...],
8769   //                                                 [live variables...])
8770 
8771   CallingConv::ID CC = CB.getCallingConv();
8772   bool IsAnyRegCC = CC == CallingConv::AnyReg;
8773   bool HasDef = !CB.getType()->isVoidTy();
8774   SDLoc dl = getCurSDLoc();
8775   SDValue Callee = getValue(CB.getArgOperand(PatchPointOpers::TargetPos));
8776 
8777   // Handle immediate and symbolic callees.
8778   if (auto* ConstCallee = dyn_cast<ConstantSDNode>(Callee))
8779     Callee = DAG.getIntPtrConstant(ConstCallee->getZExtValue(), dl,
8780                                    /*isTarget=*/true);
8781   else if (auto* SymbolicCallee = dyn_cast<GlobalAddressSDNode>(Callee))
8782     Callee =  DAG.getTargetGlobalAddress(SymbolicCallee->getGlobal(),
8783                                          SDLoc(SymbolicCallee),
8784                                          SymbolicCallee->getValueType(0));
8785 
8786   // Get the real number of arguments participating in the call <numArgs>
8787   SDValue NArgVal = getValue(CB.getArgOperand(PatchPointOpers::NArgPos));
8788   unsigned NumArgs = cast<ConstantSDNode>(NArgVal)->getZExtValue();
8789 
8790   // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
8791   // Intrinsics include all meta-operands up to but not including CC.
8792   unsigned NumMetaOpers = PatchPointOpers::CCPos;
8793   assert(CB.arg_size() >= NumMetaOpers + NumArgs &&
8794          "Not enough arguments provided to the patchpoint intrinsic");
8795 
8796   // For AnyRegCC the arguments are lowered later on manually.
8797   unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
8798   Type *ReturnTy =
8799       IsAnyRegCC ? Type::getVoidTy(*DAG.getContext()) : CB.getType();
8800 
8801   TargetLowering::CallLoweringInfo CLI(DAG);
8802   populateCallLoweringInfo(CLI, &CB, NumMetaOpers, NumCallArgs, Callee,
8803                            ReturnTy, true);
8804   std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
8805 
8806   SDNode *CallEnd = Result.second.getNode();
8807   if (HasDef && (CallEnd->getOpcode() == ISD::CopyFromReg))
8808     CallEnd = CallEnd->getOperand(0).getNode();
8809 
8810   /// Get a call instruction from the call sequence chain.
8811   /// Tail calls are not allowed.
8812   assert(CallEnd->getOpcode() == ISD::CALLSEQ_END &&
8813          "Expected a callseq node.");
8814   SDNode *Call = CallEnd->getOperand(0).getNode();
8815   bool HasGlue = Call->getGluedNode();
8816 
8817   // Replace the target specific call node with the patchable intrinsic.
8818   SmallVector<SDValue, 8> Ops;
8819 
8820   // Add the <id> and <numBytes> constants.
8821   SDValue IDVal = getValue(CB.getArgOperand(PatchPointOpers::IDPos));
8822   Ops.push_back(DAG.getTargetConstant(
8823                   cast<ConstantSDNode>(IDVal)->getZExtValue(), dl, MVT::i64));
8824   SDValue NBytesVal = getValue(CB.getArgOperand(PatchPointOpers::NBytesPos));
8825   Ops.push_back(DAG.getTargetConstant(
8826                   cast<ConstantSDNode>(NBytesVal)->getZExtValue(), dl,
8827                   MVT::i32));
8828 
8829   // Add the callee.
8830   Ops.push_back(Callee);
8831 
8832   // Adjust <numArgs> to account for any arguments that have been passed on the
8833   // stack instead.
8834   // Call Node: Chain, Target, {Args}, RegMask, [Glue]
8835   unsigned NumCallRegArgs = Call->getNumOperands() - (HasGlue ? 4 : 3);
8836   NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs;
8837   Ops.push_back(DAG.getTargetConstant(NumCallRegArgs, dl, MVT::i32));
8838 
8839   // Add the calling convention
8840   Ops.push_back(DAG.getTargetConstant((unsigned)CC, dl, MVT::i32));
8841 
8842   // Add the arguments we omitted previously. The register allocator should
8843   // place these in any free register.
8844   if (IsAnyRegCC)
8845     for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i)
8846       Ops.push_back(getValue(CB.getArgOperand(i)));
8847 
8848   // Push the arguments from the call instruction up to the register mask.
8849   SDNode::op_iterator e = HasGlue ? Call->op_end()-2 : Call->op_end()-1;
8850   Ops.append(Call->op_begin() + 2, e);
8851 
8852   // Push live variables for the stack map.
8853   addStackMapLiveVars(CB, NumMetaOpers + NumArgs, dl, Ops, *this);
8854 
8855   // Push the register mask info.
8856   if (HasGlue)
8857     Ops.push_back(*(Call->op_end()-2));
8858   else
8859     Ops.push_back(*(Call->op_end()-1));
8860 
8861   // Push the chain (this is originally the first operand of the call, but
8862   // becomes now the last or second to last operand).
8863   Ops.push_back(*(Call->op_begin()));
8864 
8865   // Push the glue flag (last operand).
8866   if (HasGlue)
8867     Ops.push_back(*(Call->op_end()-1));
8868 
8869   SDVTList NodeTys;
8870   if (IsAnyRegCC && HasDef) {
8871     // Create the return types based on the intrinsic definition
8872     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8873     SmallVector<EVT, 3> ValueVTs;
8874     ComputeValueVTs(TLI, DAG.getDataLayout(), CB.getType(), ValueVTs);
8875     assert(ValueVTs.size() == 1 && "Expected only one return value type.");
8876 
8877     // There is always a chain and a glue type at the end
8878     ValueVTs.push_back(MVT::Other);
8879     ValueVTs.push_back(MVT::Glue);
8880     NodeTys = DAG.getVTList(ValueVTs);
8881   } else
8882     NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
8883 
8884   // Replace the target specific call node with a PATCHPOINT node.
8885   MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHPOINT,
8886                                          dl, NodeTys, Ops);
8887 
8888   // Update the NodeMap.
8889   if (HasDef) {
8890     if (IsAnyRegCC)
8891       setValue(&CB, SDValue(MN, 0));
8892     else
8893       setValue(&CB, Result.first);
8894   }
8895 
8896   // Fixup the consumers of the intrinsic. The chain and glue may be used in the
8897   // call sequence. Furthermore the location of the chain and glue can change
8898   // when the AnyReg calling convention is used and the intrinsic returns a
8899   // value.
8900   if (IsAnyRegCC && HasDef) {
8901     SDValue From[] = {SDValue(Call, 0), SDValue(Call, 1)};
8902     SDValue To[] = {SDValue(MN, 1), SDValue(MN, 2)};
8903     DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
8904   } else
8905     DAG.ReplaceAllUsesWith(Call, MN);
8906   DAG.DeleteNode(Call);
8907 
8908   // Inform the Frame Information that we have a patchpoint in this function.
8909   FuncInfo.MF->getFrameInfo().setHasPatchPoint();
8910 }
8911 
8912 void SelectionDAGBuilder::visitVectorReduce(const CallInst &I,
8913                                             unsigned Intrinsic) {
8914   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8915   SDValue Op1 = getValue(I.getArgOperand(0));
8916   SDValue Op2;
8917   if (I.getNumArgOperands() > 1)
8918     Op2 = getValue(I.getArgOperand(1));
8919   SDLoc dl = getCurSDLoc();
8920   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
8921   SDValue Res;
8922   FastMathFlags FMF;
8923   if (isa<FPMathOperator>(I))
8924     FMF = I.getFastMathFlags();
8925 
8926   switch (Intrinsic) {
8927   case Intrinsic::experimental_vector_reduce_v2_fadd:
8928     if (FMF.allowReassoc())
8929       Res = DAG.getNode(ISD::FADD, dl, VT, Op1,
8930                         DAG.getNode(ISD::VECREDUCE_FADD, dl, VT, Op2));
8931     else
8932       Res = DAG.getNode(ISD::VECREDUCE_STRICT_FADD, dl, VT, Op1, Op2);
8933     break;
8934   case Intrinsic::experimental_vector_reduce_v2_fmul:
8935     if (FMF.allowReassoc())
8936       Res = DAG.getNode(ISD::FMUL, dl, VT, Op1,
8937                         DAG.getNode(ISD::VECREDUCE_FMUL, dl, VT, Op2));
8938     else
8939       Res = DAG.getNode(ISD::VECREDUCE_STRICT_FMUL, dl, VT, Op1, Op2);
8940     break;
8941   case Intrinsic::experimental_vector_reduce_add:
8942     Res = DAG.getNode(ISD::VECREDUCE_ADD, dl, VT, Op1);
8943     break;
8944   case Intrinsic::experimental_vector_reduce_mul:
8945     Res = DAG.getNode(ISD::VECREDUCE_MUL, dl, VT, Op1);
8946     break;
8947   case Intrinsic::experimental_vector_reduce_and:
8948     Res = DAG.getNode(ISD::VECREDUCE_AND, dl, VT, Op1);
8949     break;
8950   case Intrinsic::experimental_vector_reduce_or:
8951     Res = DAG.getNode(ISD::VECREDUCE_OR, dl, VT, Op1);
8952     break;
8953   case Intrinsic::experimental_vector_reduce_xor:
8954     Res = DAG.getNode(ISD::VECREDUCE_XOR, dl, VT, Op1);
8955     break;
8956   case Intrinsic::experimental_vector_reduce_smax:
8957     Res = DAG.getNode(ISD::VECREDUCE_SMAX, dl, VT, Op1);
8958     break;
8959   case Intrinsic::experimental_vector_reduce_smin:
8960     Res = DAG.getNode(ISD::VECREDUCE_SMIN, dl, VT, Op1);
8961     break;
8962   case Intrinsic::experimental_vector_reduce_umax:
8963     Res = DAG.getNode(ISD::VECREDUCE_UMAX, dl, VT, Op1);
8964     break;
8965   case Intrinsic::experimental_vector_reduce_umin:
8966     Res = DAG.getNode(ISD::VECREDUCE_UMIN, dl, VT, Op1);
8967     break;
8968   case Intrinsic::experimental_vector_reduce_fmax:
8969     Res = DAG.getNode(ISD::VECREDUCE_FMAX, dl, VT, Op1);
8970     break;
8971   case Intrinsic::experimental_vector_reduce_fmin:
8972     Res = DAG.getNode(ISD::VECREDUCE_FMIN, dl, VT, Op1);
8973     break;
8974   default:
8975     llvm_unreachable("Unhandled vector reduce intrinsic");
8976   }
8977   setValue(&I, Res);
8978 }
8979 
8980 /// Returns an AttributeList representing the attributes applied to the return
8981 /// value of the given call.
8982 static AttributeList getReturnAttrs(TargetLowering::CallLoweringInfo &CLI) {
8983   SmallVector<Attribute::AttrKind, 2> Attrs;
8984   if (CLI.RetSExt)
8985     Attrs.push_back(Attribute::SExt);
8986   if (CLI.RetZExt)
8987     Attrs.push_back(Attribute::ZExt);
8988   if (CLI.IsInReg)
8989     Attrs.push_back(Attribute::InReg);
8990 
8991   return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex,
8992                             Attrs);
8993 }
8994 
8995 /// TargetLowering::LowerCallTo - This is the default LowerCallTo
8996 /// implementation, which just calls LowerCall.
8997 /// FIXME: When all targets are
8998 /// migrated to using LowerCall, this hook should be integrated into SDISel.
8999 std::pair<SDValue, SDValue>
9000 TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
9001   // Handle the incoming return values from the call.
9002   CLI.Ins.clear();
9003   Type *OrigRetTy = CLI.RetTy;
9004   SmallVector<EVT, 4> RetTys;
9005   SmallVector<uint64_t, 4> Offsets;
9006   auto &DL = CLI.DAG.getDataLayout();
9007   ComputeValueVTs(*this, DL, CLI.RetTy, RetTys, &Offsets);
9008 
9009   if (CLI.IsPostTypeLegalization) {
9010     // If we are lowering a libcall after legalization, split the return type.
9011     SmallVector<EVT, 4> OldRetTys;
9012     SmallVector<uint64_t, 4> OldOffsets;
9013     RetTys.swap(OldRetTys);
9014     Offsets.swap(OldOffsets);
9015 
9016     for (size_t i = 0, e = OldRetTys.size(); i != e; ++i) {
9017       EVT RetVT = OldRetTys[i];
9018       uint64_t Offset = OldOffsets[i];
9019       MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), RetVT);
9020       unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), RetVT);
9021       unsigned RegisterVTByteSZ = RegisterVT.getSizeInBits() / 8;
9022       RetTys.append(NumRegs, RegisterVT);
9023       for (unsigned j = 0; j != NumRegs; ++j)
9024         Offsets.push_back(Offset + j * RegisterVTByteSZ);
9025     }
9026   }
9027 
9028   SmallVector<ISD::OutputArg, 4> Outs;
9029   GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, *this, DL);
9030 
9031   bool CanLowerReturn =
9032       this->CanLowerReturn(CLI.CallConv, CLI.DAG.getMachineFunction(),
9033                            CLI.IsVarArg, Outs, CLI.RetTy->getContext());
9034 
9035   SDValue DemoteStackSlot;
9036   int DemoteStackIdx = -100;
9037   if (!CanLowerReturn) {
9038     // FIXME: equivalent assert?
9039     // assert(!CS.hasInAllocaArgument() &&
9040     //        "sret demotion is incompatible with inalloca");
9041     uint64_t TySize = DL.getTypeAllocSize(CLI.RetTy);
9042     Align Alignment = DL.getPrefTypeAlign(CLI.RetTy);
9043     MachineFunction &MF = CLI.DAG.getMachineFunction();
9044     DemoteStackIdx =
9045         MF.getFrameInfo().CreateStackObject(TySize, Alignment, false);
9046     Type *StackSlotPtrType = PointerType::get(CLI.RetTy,
9047                                               DL.getAllocaAddrSpace());
9048 
9049     DemoteStackSlot = CLI.DAG.getFrameIndex(DemoteStackIdx, getFrameIndexTy(DL));
9050     ArgListEntry Entry;
9051     Entry.Node = DemoteStackSlot;
9052     Entry.Ty = StackSlotPtrType;
9053     Entry.IsSExt = false;
9054     Entry.IsZExt = false;
9055     Entry.IsInReg = false;
9056     Entry.IsSRet = true;
9057     Entry.IsNest = false;
9058     Entry.IsByVal = false;
9059     Entry.IsReturned = false;
9060     Entry.IsSwiftSelf = false;
9061     Entry.IsSwiftError = false;
9062     Entry.IsCFGuardTarget = false;
9063     Entry.Alignment = Alignment;
9064     CLI.getArgs().insert(CLI.getArgs().begin(), Entry);
9065     CLI.NumFixedArgs += 1;
9066     CLI.RetTy = Type::getVoidTy(CLI.RetTy->getContext());
9067 
9068     // sret demotion isn't compatible with tail-calls, since the sret argument
9069     // points into the callers stack frame.
9070     CLI.IsTailCall = false;
9071   } else {
9072     bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
9073         CLI.RetTy, CLI.CallConv, CLI.IsVarArg);
9074     for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
9075       ISD::ArgFlagsTy Flags;
9076       if (NeedsRegBlock) {
9077         Flags.setInConsecutiveRegs();
9078         if (I == RetTys.size() - 1)
9079           Flags.setInConsecutiveRegsLast();
9080       }
9081       EVT VT = RetTys[I];
9082       MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
9083                                                      CLI.CallConv, VT);
9084       unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
9085                                                        CLI.CallConv, VT);
9086       for (unsigned i = 0; i != NumRegs; ++i) {
9087         ISD::InputArg MyFlags;
9088         MyFlags.Flags = Flags;
9089         MyFlags.VT = RegisterVT;
9090         MyFlags.ArgVT = VT;
9091         MyFlags.Used = CLI.IsReturnValueUsed;
9092         if (CLI.RetTy->isPointerTy()) {
9093           MyFlags.Flags.setPointer();
9094           MyFlags.Flags.setPointerAddrSpace(
9095               cast<PointerType>(CLI.RetTy)->getAddressSpace());
9096         }
9097         if (CLI.RetSExt)
9098           MyFlags.Flags.setSExt();
9099         if (CLI.RetZExt)
9100           MyFlags.Flags.setZExt();
9101         if (CLI.IsInReg)
9102           MyFlags.Flags.setInReg();
9103         CLI.Ins.push_back(MyFlags);
9104       }
9105     }
9106   }
9107 
9108   // We push in swifterror return as the last element of CLI.Ins.
9109   ArgListTy &Args = CLI.getArgs();
9110   if (supportSwiftError()) {
9111     for (unsigned i = 0, e = Args.size(); i != e; ++i) {
9112       if (Args[i].IsSwiftError) {
9113         ISD::InputArg MyFlags;
9114         MyFlags.VT = getPointerTy(DL);
9115         MyFlags.ArgVT = EVT(getPointerTy(DL));
9116         MyFlags.Flags.setSwiftError();
9117         CLI.Ins.push_back(MyFlags);
9118       }
9119     }
9120   }
9121 
9122   // Handle all of the outgoing arguments.
9123   CLI.Outs.clear();
9124   CLI.OutVals.clear();
9125   for (unsigned i = 0, e = Args.size(); i != e; ++i) {
9126     SmallVector<EVT, 4> ValueVTs;
9127     ComputeValueVTs(*this, DL, Args[i].Ty, ValueVTs);
9128     // FIXME: Split arguments if CLI.IsPostTypeLegalization
9129     Type *FinalType = Args[i].Ty;
9130     if (Args[i].IsByVal)
9131       FinalType = cast<PointerType>(Args[i].Ty)->getElementType();
9132     bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
9133         FinalType, CLI.CallConv, CLI.IsVarArg);
9134     for (unsigned Value = 0, NumValues = ValueVTs.size(); Value != NumValues;
9135          ++Value) {
9136       EVT VT = ValueVTs[Value];
9137       Type *ArgTy = VT.getTypeForEVT(CLI.RetTy->getContext());
9138       SDValue Op = SDValue(Args[i].Node.getNode(),
9139                            Args[i].Node.getResNo() + Value);
9140       ISD::ArgFlagsTy Flags;
9141 
9142       // Certain targets (such as MIPS), may have a different ABI alignment
9143       // for a type depending on the context. Give the target a chance to
9144       // specify the alignment it wants.
9145       const Align OriginalAlignment(getABIAlignmentForCallingConv(ArgTy, DL));
9146 
9147       if (Args[i].Ty->isPointerTy()) {
9148         Flags.setPointer();
9149         Flags.setPointerAddrSpace(
9150             cast<PointerType>(Args[i].Ty)->getAddressSpace());
9151       }
9152       if (Args[i].IsZExt)
9153         Flags.setZExt();
9154       if (Args[i].IsSExt)
9155         Flags.setSExt();
9156       if (Args[i].IsInReg) {
9157         // If we are using vectorcall calling convention, a structure that is
9158         // passed InReg - is surely an HVA
9159         if (CLI.CallConv == CallingConv::X86_VectorCall &&
9160             isa<StructType>(FinalType)) {
9161           // The first value of a structure is marked
9162           if (0 == Value)
9163             Flags.setHvaStart();
9164           Flags.setHva();
9165         }
9166         // Set InReg Flag
9167         Flags.setInReg();
9168       }
9169       if (Args[i].IsSRet)
9170         Flags.setSRet();
9171       if (Args[i].IsSwiftSelf)
9172         Flags.setSwiftSelf();
9173       if (Args[i].IsSwiftError)
9174         Flags.setSwiftError();
9175       if (Args[i].IsCFGuardTarget)
9176         Flags.setCFGuardTarget();
9177       if (Args[i].IsByVal)
9178         Flags.setByVal();
9179       if (Args[i].IsPreallocated) {
9180         Flags.setPreallocated();
9181         // Set the byval flag for CCAssignFn callbacks that don't know about
9182         // preallocated.  This way we can know how many bytes we should've
9183         // allocated and how many bytes a callee cleanup function will pop.  If
9184         // we port preallocated to more targets, we'll have to add custom
9185         // preallocated handling in the various CC lowering callbacks.
9186         Flags.setByVal();
9187       }
9188       if (Args[i].IsInAlloca) {
9189         Flags.setInAlloca();
9190         // Set the byval flag for CCAssignFn callbacks that don't know about
9191         // inalloca.  This way we can know how many bytes we should've allocated
9192         // and how many bytes a callee cleanup function will pop.  If we port
9193         // inalloca to more targets, we'll have to add custom inalloca handling
9194         // in the various CC lowering callbacks.
9195         Flags.setByVal();
9196       }
9197       if (Args[i].IsByVal || Args[i].IsInAlloca || Args[i].IsPreallocated) {
9198         PointerType *Ty = cast<PointerType>(Args[i].Ty);
9199         Type *ElementTy = Ty->getElementType();
9200 
9201         unsigned FrameSize = DL.getTypeAllocSize(
9202             Args[i].ByValType ? Args[i].ByValType : ElementTy);
9203         Flags.setByValSize(FrameSize);
9204 
9205         // info is not there but there are cases it cannot get right.
9206         Align FrameAlign;
9207         if (auto MA = Args[i].Alignment)
9208           FrameAlign = *MA;
9209         else
9210           FrameAlign = Align(getByValTypeAlignment(ElementTy, DL));
9211         Flags.setByValAlign(FrameAlign);
9212       }
9213       if (Args[i].IsNest)
9214         Flags.setNest();
9215       if (NeedsRegBlock)
9216         Flags.setInConsecutiveRegs();
9217       Flags.setOrigAlign(OriginalAlignment);
9218 
9219       MVT PartVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
9220                                                  CLI.CallConv, VT);
9221       unsigned NumParts = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
9222                                                         CLI.CallConv, VT);
9223       SmallVector<SDValue, 4> Parts(NumParts);
9224       ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
9225 
9226       if (Args[i].IsSExt)
9227         ExtendKind = ISD::SIGN_EXTEND;
9228       else if (Args[i].IsZExt)
9229         ExtendKind = ISD::ZERO_EXTEND;
9230 
9231       // Conservatively only handle 'returned' on non-vectors that can be lowered,
9232       // for now.
9233       if (Args[i].IsReturned && !Op.getValueType().isVector() &&
9234           CanLowerReturn) {
9235         assert((CLI.RetTy == Args[i].Ty ||
9236                 (CLI.RetTy->isPointerTy() && Args[i].Ty->isPointerTy() &&
9237                  CLI.RetTy->getPointerAddressSpace() ==
9238                      Args[i].Ty->getPointerAddressSpace())) &&
9239                RetTys.size() == NumValues && "unexpected use of 'returned'");
9240         // Before passing 'returned' to the target lowering code, ensure that
9241         // either the register MVT and the actual EVT are the same size or that
9242         // the return value and argument are extended in the same way; in these
9243         // cases it's safe to pass the argument register value unchanged as the
9244         // return register value (although it's at the target's option whether
9245         // to do so)
9246         // TODO: allow code generation to take advantage of partially preserved
9247         // registers rather than clobbering the entire register when the
9248         // parameter extension method is not compatible with the return
9249         // extension method
9250         if ((NumParts * PartVT.getSizeInBits() == VT.getSizeInBits()) ||
9251             (ExtendKind != ISD::ANY_EXTEND && CLI.RetSExt == Args[i].IsSExt &&
9252              CLI.RetZExt == Args[i].IsZExt))
9253           Flags.setReturned();
9254       }
9255 
9256       getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT, CLI.CB,
9257                      CLI.CallConv, ExtendKind);
9258 
9259       for (unsigned j = 0; j != NumParts; ++j) {
9260         // if it isn't first piece, alignment must be 1
9261         // For scalable vectors the scalable part is currently handled
9262         // by individual targets, so we just use the known minimum size here.
9263         ISD::OutputArg MyFlags(Flags, Parts[j].getValueType(), VT,
9264                     i < CLI.NumFixedArgs, i,
9265                     j*Parts[j].getValueType().getStoreSize().getKnownMinSize());
9266         if (NumParts > 1 && j == 0)
9267           MyFlags.Flags.setSplit();
9268         else if (j != 0) {
9269           MyFlags.Flags.setOrigAlign(Align(1));
9270           if (j == NumParts - 1)
9271             MyFlags.Flags.setSplitEnd();
9272         }
9273 
9274         CLI.Outs.push_back(MyFlags);
9275         CLI.OutVals.push_back(Parts[j]);
9276       }
9277 
9278       if (NeedsRegBlock && Value == NumValues - 1)
9279         CLI.Outs[CLI.Outs.size() - 1].Flags.setInConsecutiveRegsLast();
9280     }
9281   }
9282 
9283   SmallVector<SDValue, 4> InVals;
9284   CLI.Chain = LowerCall(CLI, InVals);
9285 
9286   // Update CLI.InVals to use outside of this function.
9287   CLI.InVals = InVals;
9288 
9289   // Verify that the target's LowerCall behaved as expected.
9290   assert(CLI.Chain.getNode() && CLI.Chain.getValueType() == MVT::Other &&
9291          "LowerCall didn't return a valid chain!");
9292   assert((!CLI.IsTailCall || InVals.empty()) &&
9293          "LowerCall emitted a return value for a tail call!");
9294   assert((CLI.IsTailCall || InVals.size() == CLI.Ins.size()) &&
9295          "LowerCall didn't emit the correct number of values!");
9296 
9297   // For a tail call, the return value is merely live-out and there aren't
9298   // any nodes in the DAG representing it. Return a special value to
9299   // indicate that a tail call has been emitted and no more Instructions
9300   // should be processed in the current block.
9301   if (CLI.IsTailCall) {
9302     CLI.DAG.setRoot(CLI.Chain);
9303     return std::make_pair(SDValue(), SDValue());
9304   }
9305 
9306 #ifndef NDEBUG
9307   for (unsigned i = 0, e = CLI.Ins.size(); i != e; ++i) {
9308     assert(InVals[i].getNode() && "LowerCall emitted a null value!");
9309     assert(EVT(CLI.Ins[i].VT) == InVals[i].getValueType() &&
9310            "LowerCall emitted a value with the wrong type!");
9311   }
9312 #endif
9313 
9314   SmallVector<SDValue, 4> ReturnValues;
9315   if (!CanLowerReturn) {
9316     // The instruction result is the result of loading from the
9317     // hidden sret parameter.
9318     SmallVector<EVT, 1> PVTs;
9319     Type *PtrRetTy = OrigRetTy->getPointerTo(DL.getAllocaAddrSpace());
9320 
9321     ComputeValueVTs(*this, DL, PtrRetTy, PVTs);
9322     assert(PVTs.size() == 1 && "Pointers should fit in one register");
9323     EVT PtrVT = PVTs[0];
9324 
9325     unsigned NumValues = RetTys.size();
9326     ReturnValues.resize(NumValues);
9327     SmallVector<SDValue, 4> Chains(NumValues);
9328 
9329     // An aggregate return value cannot wrap around the address space, so
9330     // offsets to its parts don't wrap either.
9331     SDNodeFlags Flags;
9332     Flags.setNoUnsignedWrap(true);
9333 
9334     MachineFunction &MF = CLI.DAG.getMachineFunction();
9335     Align HiddenSRetAlign = MF.getFrameInfo().getObjectAlign(DemoteStackIdx);
9336     for (unsigned i = 0; i < NumValues; ++i) {
9337       SDValue Add = CLI.DAG.getNode(ISD::ADD, CLI.DL, PtrVT, DemoteStackSlot,
9338                                     CLI.DAG.getConstant(Offsets[i], CLI.DL,
9339                                                         PtrVT), Flags);
9340       SDValue L = CLI.DAG.getLoad(
9341           RetTys[i], CLI.DL, CLI.Chain, Add,
9342           MachinePointerInfo::getFixedStack(CLI.DAG.getMachineFunction(),
9343                                             DemoteStackIdx, Offsets[i]),
9344           HiddenSRetAlign);
9345       ReturnValues[i] = L;
9346       Chains[i] = L.getValue(1);
9347     }
9348 
9349     CLI.Chain = CLI.DAG.getNode(ISD::TokenFactor, CLI.DL, MVT::Other, Chains);
9350   } else {
9351     // Collect the legal value parts into potentially illegal values
9352     // that correspond to the original function's return values.
9353     Optional<ISD::NodeType> AssertOp;
9354     if (CLI.RetSExt)
9355       AssertOp = ISD::AssertSext;
9356     else if (CLI.RetZExt)
9357       AssertOp = ISD::AssertZext;
9358     unsigned CurReg = 0;
9359     for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
9360       EVT VT = RetTys[I];
9361       MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
9362                                                      CLI.CallConv, VT);
9363       unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
9364                                                        CLI.CallConv, VT);
9365 
9366       ReturnValues.push_back(getCopyFromParts(CLI.DAG, CLI.DL, &InVals[CurReg],
9367                                               NumRegs, RegisterVT, VT, nullptr,
9368                                               CLI.CallConv, AssertOp));
9369       CurReg += NumRegs;
9370     }
9371 
9372     // For a function returning void, there is no return value. We can't create
9373     // such a node, so we just return a null return value in that case. In
9374     // that case, nothing will actually look at the value.
9375     if (ReturnValues.empty())
9376       return std::make_pair(SDValue(), CLI.Chain);
9377   }
9378 
9379   SDValue Res = CLI.DAG.getNode(ISD::MERGE_VALUES, CLI.DL,
9380                                 CLI.DAG.getVTList(RetTys), ReturnValues);
9381   return std::make_pair(Res, CLI.Chain);
9382 }
9383 
9384 void TargetLowering::LowerOperationWrapper(SDNode *N,
9385                                            SmallVectorImpl<SDValue> &Results,
9386                                            SelectionDAG &DAG) const {
9387   if (SDValue Res = LowerOperation(SDValue(N, 0), DAG))
9388     Results.push_back(Res);
9389 }
9390 
9391 SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
9392   llvm_unreachable("LowerOperation not implemented for this target!");
9393 }
9394 
9395 void
9396 SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) {
9397   SDValue Op = getNonRegisterValue(V);
9398   assert((Op.getOpcode() != ISD::CopyFromReg ||
9399           cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
9400          "Copy from a reg to the same reg!");
9401   assert(!Register::isPhysicalRegister(Reg) && "Is a physreg");
9402 
9403   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9404   // If this is an InlineAsm we have to match the registers required, not the
9405   // notional registers required by the type.
9406 
9407   RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg, V->getType(),
9408                    None); // This is not an ABI copy.
9409   SDValue Chain = DAG.getEntryNode();
9410 
9411   ISD::NodeType ExtendType = (FuncInfo.PreferredExtendType.find(V) ==
9412                               FuncInfo.PreferredExtendType.end())
9413                                  ? ISD::ANY_EXTEND
9414                                  : FuncInfo.PreferredExtendType[V];
9415   RFV.getCopyToRegs(Op, DAG, getCurSDLoc(), Chain, nullptr, V, ExtendType);
9416   PendingExports.push_back(Chain);
9417 }
9418 
9419 #include "llvm/CodeGen/SelectionDAGISel.h"
9420 
9421 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
9422 /// entry block, return true.  This includes arguments used by switches, since
9423 /// the switch may expand into multiple basic blocks.
9424 static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) {
9425   // With FastISel active, we may be splitting blocks, so force creation
9426   // of virtual registers for all non-dead arguments.
9427   if (FastISel)
9428     return A->use_empty();
9429 
9430   const BasicBlock &Entry = A->getParent()->front();
9431   for (const User *U : A->users())
9432     if (cast<Instruction>(U)->getParent() != &Entry || isa<SwitchInst>(U))
9433       return false;  // Use not in entry block.
9434 
9435   return true;
9436 }
9437 
9438 using ArgCopyElisionMapTy =
9439     DenseMap<const Argument *,
9440              std::pair<const AllocaInst *, const StoreInst *>>;
9441 
9442 /// Scan the entry block of the function in FuncInfo for arguments that look
9443 /// like copies into a local alloca. Record any copied arguments in
9444 /// ArgCopyElisionCandidates.
9445 static void
9446 findArgumentCopyElisionCandidates(const DataLayout &DL,
9447                                   FunctionLoweringInfo *FuncInfo,
9448                                   ArgCopyElisionMapTy &ArgCopyElisionCandidates) {
9449   // Record the state of every static alloca used in the entry block. Argument
9450   // allocas are all used in the entry block, so we need approximately as many
9451   // entries as we have arguments.
9452   enum StaticAllocaInfo { Unknown, Clobbered, Elidable };
9453   SmallDenseMap<const AllocaInst *, StaticAllocaInfo, 8> StaticAllocas;
9454   unsigned NumArgs = FuncInfo->Fn->arg_size();
9455   StaticAllocas.reserve(NumArgs * 2);
9456 
9457   auto GetInfoIfStaticAlloca = [&](const Value *V) -> StaticAllocaInfo * {
9458     if (!V)
9459       return nullptr;
9460     V = V->stripPointerCasts();
9461     const auto *AI = dyn_cast<AllocaInst>(V);
9462     if (!AI || !AI->isStaticAlloca() || !FuncInfo->StaticAllocaMap.count(AI))
9463       return nullptr;
9464     auto Iter = StaticAllocas.insert({AI, Unknown});
9465     return &Iter.first->second;
9466   };
9467 
9468   // Look for stores of arguments to static allocas. Look through bitcasts and
9469   // GEPs to handle type coercions, as long as the alloca is fully initialized
9470   // by the store. Any non-store use of an alloca escapes it and any subsequent
9471   // unanalyzed store might write it.
9472   // FIXME: Handle structs initialized with multiple stores.
9473   for (const Instruction &I : FuncInfo->Fn->getEntryBlock()) {
9474     // Look for stores, and handle non-store uses conservatively.
9475     const auto *SI = dyn_cast<StoreInst>(&I);
9476     if (!SI) {
9477       // We will look through cast uses, so ignore them completely.
9478       if (I.isCast())
9479         continue;
9480       // Ignore debug info intrinsics, they don't escape or store to allocas.
9481       if (isa<DbgInfoIntrinsic>(I))
9482         continue;
9483       // This is an unknown instruction. Assume it escapes or writes to all
9484       // static alloca operands.
9485       for (const Use &U : I.operands()) {
9486         if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(U))
9487           *Info = StaticAllocaInfo::Clobbered;
9488       }
9489       continue;
9490     }
9491 
9492     // If the stored value is a static alloca, mark it as escaped.
9493     if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(SI->getValueOperand()))
9494       *Info = StaticAllocaInfo::Clobbered;
9495 
9496     // Check if the destination is a static alloca.
9497     const Value *Dst = SI->getPointerOperand()->stripPointerCasts();
9498     StaticAllocaInfo *Info = GetInfoIfStaticAlloca(Dst);
9499     if (!Info)
9500       continue;
9501     const AllocaInst *AI = cast<AllocaInst>(Dst);
9502 
9503     // Skip allocas that have been initialized or clobbered.
9504     if (*Info != StaticAllocaInfo::Unknown)
9505       continue;
9506 
9507     // Check if the stored value is an argument, and that this store fully
9508     // initializes the alloca. Don't elide copies from the same argument twice.
9509     const Value *Val = SI->getValueOperand()->stripPointerCasts();
9510     const auto *Arg = dyn_cast<Argument>(Val);
9511     if (!Arg || Arg->hasPassPointeeByValueAttr() ||
9512         Arg->getType()->isEmptyTy() ||
9513         DL.getTypeStoreSize(Arg->getType()) !=
9514             DL.getTypeAllocSize(AI->getAllocatedType()) ||
9515         ArgCopyElisionCandidates.count(Arg)) {
9516       *Info = StaticAllocaInfo::Clobbered;
9517       continue;
9518     }
9519 
9520     LLVM_DEBUG(dbgs() << "Found argument copy elision candidate: " << *AI
9521                       << '\n');
9522 
9523     // Mark this alloca and store for argument copy elision.
9524     *Info = StaticAllocaInfo::Elidable;
9525     ArgCopyElisionCandidates.insert({Arg, {AI, SI}});
9526 
9527     // Stop scanning if we've seen all arguments. This will happen early in -O0
9528     // builds, which is useful, because -O0 builds have large entry blocks and
9529     // many allocas.
9530     if (ArgCopyElisionCandidates.size() == NumArgs)
9531       break;
9532   }
9533 }
9534 
9535 /// Try to elide argument copies from memory into a local alloca. Succeeds if
9536 /// ArgVal is a load from a suitable fixed stack object.
9537 static void tryToElideArgumentCopy(
9538     FunctionLoweringInfo &FuncInfo, SmallVectorImpl<SDValue> &Chains,
9539     DenseMap<int, int> &ArgCopyElisionFrameIndexMap,
9540     SmallPtrSetImpl<const Instruction *> &ElidedArgCopyInstrs,
9541     ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg,
9542     SDValue ArgVal, bool &ArgHasUses) {
9543   // Check if this is a load from a fixed stack object.
9544   auto *LNode = dyn_cast<LoadSDNode>(ArgVal);
9545   if (!LNode)
9546     return;
9547   auto *FINode = dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode());
9548   if (!FINode)
9549     return;
9550 
9551   // Check that the fixed stack object is the right size and alignment.
9552   // Look at the alignment that the user wrote on the alloca instead of looking
9553   // at the stack object.
9554   auto ArgCopyIter = ArgCopyElisionCandidates.find(&Arg);
9555   assert(ArgCopyIter != ArgCopyElisionCandidates.end());
9556   const AllocaInst *AI = ArgCopyIter->second.first;
9557   int FixedIndex = FINode->getIndex();
9558   int &AllocaIndex = FuncInfo.StaticAllocaMap[AI];
9559   int OldIndex = AllocaIndex;
9560   MachineFrameInfo &MFI = FuncInfo.MF->getFrameInfo();
9561   if (MFI.getObjectSize(FixedIndex) != MFI.getObjectSize(OldIndex)) {
9562     LLVM_DEBUG(
9563         dbgs() << "  argument copy elision failed due to bad fixed stack "
9564                   "object size\n");
9565     return;
9566   }
9567   Align RequiredAlignment = AI->getAlign();
9568   if (MFI.getObjectAlign(FixedIndex) < RequiredAlignment) {
9569     LLVM_DEBUG(dbgs() << "  argument copy elision failed: alignment of alloca "
9570                          "greater than stack argument alignment ("
9571                       << DebugStr(RequiredAlignment) << " vs "
9572                       << DebugStr(MFI.getObjectAlign(FixedIndex)) << ")\n");
9573     return;
9574   }
9575 
9576   // Perform the elision. Delete the old stack object and replace its only use
9577   // in the variable info map. Mark the stack object as mutable.
9578   LLVM_DEBUG({
9579     dbgs() << "Eliding argument copy from " << Arg << " to " << *AI << '\n'
9580            << "  Replacing frame index " << OldIndex << " with " << FixedIndex
9581            << '\n';
9582   });
9583   MFI.RemoveStackObject(OldIndex);
9584   MFI.setIsImmutableObjectIndex(FixedIndex, false);
9585   AllocaIndex = FixedIndex;
9586   ArgCopyElisionFrameIndexMap.insert({OldIndex, FixedIndex});
9587   Chains.push_back(ArgVal.getValue(1));
9588 
9589   // Avoid emitting code for the store implementing the copy.
9590   const StoreInst *SI = ArgCopyIter->second.second;
9591   ElidedArgCopyInstrs.insert(SI);
9592 
9593   // Check for uses of the argument again so that we can avoid exporting ArgVal
9594   // if it is't used by anything other than the store.
9595   for (const Value *U : Arg.users()) {
9596     if (U != SI) {
9597       ArgHasUses = true;
9598       break;
9599     }
9600   }
9601 }
9602 
9603 void SelectionDAGISel::LowerArguments(const Function &F) {
9604   SelectionDAG &DAG = SDB->DAG;
9605   SDLoc dl = SDB->getCurSDLoc();
9606   const DataLayout &DL = DAG.getDataLayout();
9607   SmallVector<ISD::InputArg, 16> Ins;
9608 
9609   if (!FuncInfo->CanLowerReturn) {
9610     // Put in an sret pointer parameter before all the other parameters.
9611     SmallVector<EVT, 1> ValueVTs;
9612     ComputeValueVTs(*TLI, DAG.getDataLayout(),
9613                     F.getReturnType()->getPointerTo(
9614                         DAG.getDataLayout().getAllocaAddrSpace()),
9615                     ValueVTs);
9616 
9617     // NOTE: Assuming that a pointer will never break down to more than one VT
9618     // or one register.
9619     ISD::ArgFlagsTy Flags;
9620     Flags.setSRet();
9621     MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVTs[0]);
9622     ISD::InputArg RetArg(Flags, RegisterVT, ValueVTs[0], true,
9623                          ISD::InputArg::NoArgIndex, 0);
9624     Ins.push_back(RetArg);
9625   }
9626 
9627   // Look for stores of arguments to static allocas. Mark such arguments with a
9628   // flag to ask the target to give us the memory location of that argument if
9629   // available.
9630   ArgCopyElisionMapTy ArgCopyElisionCandidates;
9631   findArgumentCopyElisionCandidates(DL, FuncInfo.get(),
9632                                     ArgCopyElisionCandidates);
9633 
9634   // Set up the incoming argument description vector.
9635   for (const Argument &Arg : F.args()) {
9636     unsigned ArgNo = Arg.getArgNo();
9637     SmallVector<EVT, 4> ValueVTs;
9638     ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
9639     bool isArgValueUsed = !Arg.use_empty();
9640     unsigned PartBase = 0;
9641     Type *FinalType = Arg.getType();
9642     if (Arg.hasAttribute(Attribute::ByVal))
9643       FinalType = Arg.getParamByValType();
9644     bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
9645         FinalType, F.getCallingConv(), F.isVarArg());
9646     for (unsigned Value = 0, NumValues = ValueVTs.size();
9647          Value != NumValues; ++Value) {
9648       EVT VT = ValueVTs[Value];
9649       Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
9650       ISD::ArgFlagsTy Flags;
9651 
9652       // Certain targets (such as MIPS), may have a different ABI alignment
9653       // for a type depending on the context. Give the target a chance to
9654       // specify the alignment it wants.
9655       const Align OriginalAlignment(
9656           TLI->getABIAlignmentForCallingConv(ArgTy, DL));
9657 
9658       if (Arg.getType()->isPointerTy()) {
9659         Flags.setPointer();
9660         Flags.setPointerAddrSpace(
9661             cast<PointerType>(Arg.getType())->getAddressSpace());
9662       }
9663       if (Arg.hasAttribute(Attribute::ZExt))
9664         Flags.setZExt();
9665       if (Arg.hasAttribute(Attribute::SExt))
9666         Flags.setSExt();
9667       if (Arg.hasAttribute(Attribute::InReg)) {
9668         // If we are using vectorcall calling convention, a structure that is
9669         // passed InReg - is surely an HVA
9670         if (F.getCallingConv() == CallingConv::X86_VectorCall &&
9671             isa<StructType>(Arg.getType())) {
9672           // The first value of a structure is marked
9673           if (0 == Value)
9674             Flags.setHvaStart();
9675           Flags.setHva();
9676         }
9677         // Set InReg Flag
9678         Flags.setInReg();
9679       }
9680       if (Arg.hasAttribute(Attribute::StructRet))
9681         Flags.setSRet();
9682       if (Arg.hasAttribute(Attribute::SwiftSelf))
9683         Flags.setSwiftSelf();
9684       if (Arg.hasAttribute(Attribute::SwiftError))
9685         Flags.setSwiftError();
9686       if (Arg.hasAttribute(Attribute::ByVal))
9687         Flags.setByVal();
9688       if (Arg.hasAttribute(Attribute::InAlloca)) {
9689         Flags.setInAlloca();
9690         // Set the byval flag for CCAssignFn callbacks that don't know about
9691         // inalloca.  This way we can know how many bytes we should've allocated
9692         // and how many bytes a callee cleanup function will pop.  If we port
9693         // inalloca to more targets, we'll have to add custom inalloca handling
9694         // in the various CC lowering callbacks.
9695         Flags.setByVal();
9696       }
9697       if (Arg.hasAttribute(Attribute::Preallocated)) {
9698         Flags.setPreallocated();
9699         // Set the byval flag for CCAssignFn callbacks that don't know about
9700         // preallocated.  This way we can know how many bytes we should've
9701         // allocated and how many bytes a callee cleanup function will pop.  If
9702         // we port preallocated to more targets, we'll have to add custom
9703         // preallocated handling in the various CC lowering callbacks.
9704         Flags.setByVal();
9705       }
9706       if (F.getCallingConv() == CallingConv::X86_INTR) {
9707         // IA Interrupt passes frame (1st parameter) by value in the stack.
9708         if (ArgNo == 0)
9709           Flags.setByVal();
9710       }
9711       if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated()) {
9712         Type *ElementTy = Arg.getParamByValType();
9713 
9714         // For ByVal, size and alignment should be passed from FE.  BE will
9715         // guess if this info is not there but there are cases it cannot get
9716         // right.
9717         unsigned FrameSize = DL.getTypeAllocSize(Arg.getParamByValType());
9718         Flags.setByValSize(FrameSize);
9719 
9720         unsigned FrameAlign;
9721         if (Arg.getParamAlignment())
9722           FrameAlign = Arg.getParamAlignment();
9723         else
9724           FrameAlign = TLI->getByValTypeAlignment(ElementTy, DL);
9725         Flags.setByValAlign(Align(FrameAlign));
9726       }
9727       if (Arg.hasAttribute(Attribute::Nest))
9728         Flags.setNest();
9729       if (NeedsRegBlock)
9730         Flags.setInConsecutiveRegs();
9731       Flags.setOrigAlign(OriginalAlignment);
9732       if (ArgCopyElisionCandidates.count(&Arg))
9733         Flags.setCopyElisionCandidate();
9734       if (Arg.hasAttribute(Attribute::Returned))
9735         Flags.setReturned();
9736 
9737       MVT RegisterVT = TLI->getRegisterTypeForCallingConv(
9738           *CurDAG->getContext(), F.getCallingConv(), VT);
9739       unsigned NumRegs = TLI->getNumRegistersForCallingConv(
9740           *CurDAG->getContext(), F.getCallingConv(), VT);
9741       for (unsigned i = 0; i != NumRegs; ++i) {
9742         // For scalable vectors, use the minimum size; individual targets
9743         // are responsible for handling scalable vector arguments and
9744         // return values.
9745         ISD::InputArg MyFlags(Flags, RegisterVT, VT, isArgValueUsed,
9746                  ArgNo, PartBase+i*RegisterVT.getStoreSize().getKnownMinSize());
9747         if (NumRegs > 1 && i == 0)
9748           MyFlags.Flags.setSplit();
9749         // if it isn't first piece, alignment must be 1
9750         else if (i > 0) {
9751           MyFlags.Flags.setOrigAlign(Align(1));
9752           if (i == NumRegs - 1)
9753             MyFlags.Flags.setSplitEnd();
9754         }
9755         Ins.push_back(MyFlags);
9756       }
9757       if (NeedsRegBlock && Value == NumValues - 1)
9758         Ins[Ins.size() - 1].Flags.setInConsecutiveRegsLast();
9759       PartBase += VT.getStoreSize().getKnownMinSize();
9760     }
9761   }
9762 
9763   // Call the target to set up the argument values.
9764   SmallVector<SDValue, 8> InVals;
9765   SDValue NewRoot = TLI->LowerFormalArguments(
9766       DAG.getRoot(), F.getCallingConv(), F.isVarArg(), Ins, dl, DAG, InVals);
9767 
9768   // Verify that the target's LowerFormalArguments behaved as expected.
9769   assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other &&
9770          "LowerFormalArguments didn't return a valid chain!");
9771   assert(InVals.size() == Ins.size() &&
9772          "LowerFormalArguments didn't emit the correct number of values!");
9773   LLVM_DEBUG({
9774     for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
9775       assert(InVals[i].getNode() &&
9776              "LowerFormalArguments emitted a null value!");
9777       assert(EVT(Ins[i].VT) == InVals[i].getValueType() &&
9778              "LowerFormalArguments emitted a value with the wrong type!");
9779     }
9780   });
9781 
9782   // Update the DAG with the new chain value resulting from argument lowering.
9783   DAG.setRoot(NewRoot);
9784 
9785   // Set up the argument values.
9786   unsigned i = 0;
9787   if (!FuncInfo->CanLowerReturn) {
9788     // Create a virtual register for the sret pointer, and put in a copy
9789     // from the sret argument into it.
9790     SmallVector<EVT, 1> ValueVTs;
9791     ComputeValueVTs(*TLI, DAG.getDataLayout(),
9792                     F.getReturnType()->getPointerTo(
9793                         DAG.getDataLayout().getAllocaAddrSpace()),
9794                     ValueVTs);
9795     MVT VT = ValueVTs[0].getSimpleVT();
9796     MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
9797     Optional<ISD::NodeType> AssertOp = None;
9798     SDValue ArgValue = getCopyFromParts(DAG, dl, &InVals[0], 1, RegVT, VT,
9799                                         nullptr, F.getCallingConv(), AssertOp);
9800 
9801     MachineFunction& MF = SDB->DAG.getMachineFunction();
9802     MachineRegisterInfo& RegInfo = MF.getRegInfo();
9803     Register SRetReg =
9804         RegInfo.createVirtualRegister(TLI->getRegClassFor(RegVT));
9805     FuncInfo->DemoteRegister = SRetReg;
9806     NewRoot =
9807         SDB->DAG.getCopyToReg(NewRoot, SDB->getCurSDLoc(), SRetReg, ArgValue);
9808     DAG.setRoot(NewRoot);
9809 
9810     // i indexes lowered arguments.  Bump it past the hidden sret argument.
9811     ++i;
9812   }
9813 
9814   SmallVector<SDValue, 4> Chains;
9815   DenseMap<int, int> ArgCopyElisionFrameIndexMap;
9816   for (const Argument &Arg : F.args()) {
9817     SmallVector<SDValue, 4> ArgValues;
9818     SmallVector<EVT, 4> ValueVTs;
9819     ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
9820     unsigned NumValues = ValueVTs.size();
9821     if (NumValues == 0)
9822       continue;
9823 
9824     bool ArgHasUses = !Arg.use_empty();
9825 
9826     // Elide the copying store if the target loaded this argument from a
9827     // suitable fixed stack object.
9828     if (Ins[i].Flags.isCopyElisionCandidate()) {
9829       tryToElideArgumentCopy(*FuncInfo, Chains, ArgCopyElisionFrameIndexMap,
9830                              ElidedArgCopyInstrs, ArgCopyElisionCandidates, Arg,
9831                              InVals[i], ArgHasUses);
9832     }
9833 
9834     // If this argument is unused then remember its value. It is used to generate
9835     // debugging information.
9836     bool isSwiftErrorArg =
9837         TLI->supportSwiftError() &&
9838         Arg.hasAttribute(Attribute::SwiftError);
9839     if (!ArgHasUses && !isSwiftErrorArg) {
9840       SDB->setUnusedArgValue(&Arg, InVals[i]);
9841 
9842       // Also remember any frame index for use in FastISel.
9843       if (FrameIndexSDNode *FI =
9844           dyn_cast<FrameIndexSDNode>(InVals[i].getNode()))
9845         FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
9846     }
9847 
9848     for (unsigned Val = 0; Val != NumValues; ++Val) {
9849       EVT VT = ValueVTs[Val];
9850       MVT PartVT = TLI->getRegisterTypeForCallingConv(*CurDAG->getContext(),
9851                                                       F.getCallingConv(), VT);
9852       unsigned NumParts = TLI->getNumRegistersForCallingConv(
9853           *CurDAG->getContext(), F.getCallingConv(), VT);
9854 
9855       // Even an apparent 'unused' swifterror argument needs to be returned. So
9856       // we do generate a copy for it that can be used on return from the
9857       // function.
9858       if (ArgHasUses || isSwiftErrorArg) {
9859         Optional<ISD::NodeType> AssertOp;
9860         if (Arg.hasAttribute(Attribute::SExt))
9861           AssertOp = ISD::AssertSext;
9862         else if (Arg.hasAttribute(Attribute::ZExt))
9863           AssertOp = ISD::AssertZext;
9864 
9865         ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], NumParts,
9866                                              PartVT, VT, nullptr,
9867                                              F.getCallingConv(), AssertOp));
9868       }
9869 
9870       i += NumParts;
9871     }
9872 
9873     // We don't need to do anything else for unused arguments.
9874     if (ArgValues.empty())
9875       continue;
9876 
9877     // Note down frame index.
9878     if (FrameIndexSDNode *FI =
9879         dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode()))
9880       FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
9881 
9882     SDValue Res = DAG.getMergeValues(makeArrayRef(ArgValues.data(), NumValues),
9883                                      SDB->getCurSDLoc());
9884 
9885     SDB->setValue(&Arg, Res);
9886     if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::BUILD_PAIR) {
9887       // We want to associate the argument with the frame index, among
9888       // involved operands, that correspond to the lowest address. The
9889       // getCopyFromParts function, called earlier, is swapping the order of
9890       // the operands to BUILD_PAIR depending on endianness. The result of
9891       // that swapping is that the least significant bits of the argument will
9892       // be in the first operand of the BUILD_PAIR node, and the most
9893       // significant bits will be in the second operand.
9894       unsigned LowAddressOp = DAG.getDataLayout().isBigEndian() ? 1 : 0;
9895       if (LoadSDNode *LNode =
9896           dyn_cast<LoadSDNode>(Res.getOperand(LowAddressOp).getNode()))
9897         if (FrameIndexSDNode *FI =
9898             dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
9899           FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
9900     }
9901 
9902     // Analyses past this point are naive and don't expect an assertion.
9903     if (Res.getOpcode() == ISD::AssertZext)
9904       Res = Res.getOperand(0);
9905 
9906     // Update the SwiftErrorVRegDefMap.
9907     if (Res.getOpcode() == ISD::CopyFromReg && isSwiftErrorArg) {
9908       unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
9909       if (Register::isVirtualRegister(Reg))
9910         SwiftError->setCurrentVReg(FuncInfo->MBB, SwiftError->getFunctionArg(),
9911                                    Reg);
9912     }
9913 
9914     // If this argument is live outside of the entry block, insert a copy from
9915     // wherever we got it to the vreg that other BB's will reference it as.
9916     if (Res.getOpcode() == ISD::CopyFromReg) {
9917       // If we can, though, try to skip creating an unnecessary vreg.
9918       // FIXME: This isn't very clean... it would be nice to make this more
9919       // general.
9920       unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
9921       if (Register::isVirtualRegister(Reg)) {
9922         FuncInfo->ValueMap[&Arg] = Reg;
9923         continue;
9924       }
9925     }
9926     if (!isOnlyUsedInEntryBlock(&Arg, TM.Options.EnableFastISel)) {
9927       FuncInfo->InitializeRegForValue(&Arg);
9928       SDB->CopyToExportRegsIfNeeded(&Arg);
9929     }
9930   }
9931 
9932   if (!Chains.empty()) {
9933     Chains.push_back(NewRoot);
9934     NewRoot = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
9935   }
9936 
9937   DAG.setRoot(NewRoot);
9938 
9939   assert(i == InVals.size() && "Argument register count mismatch!");
9940 
9941   // If any argument copy elisions occurred and we have debug info, update the
9942   // stale frame indices used in the dbg.declare variable info table.
9943   MachineFunction::VariableDbgInfoMapTy &DbgDeclareInfo = MF->getVariableDbgInfo();
9944   if (!DbgDeclareInfo.empty() && !ArgCopyElisionFrameIndexMap.empty()) {
9945     for (MachineFunction::VariableDbgInfo &VI : DbgDeclareInfo) {
9946       auto I = ArgCopyElisionFrameIndexMap.find(VI.Slot);
9947       if (I != ArgCopyElisionFrameIndexMap.end())
9948         VI.Slot = I->second;
9949     }
9950   }
9951 
9952   // Finally, if the target has anything special to do, allow it to do so.
9953   emitFunctionEntryCode();
9954 }
9955 
9956 /// Handle PHI nodes in successor blocks.  Emit code into the SelectionDAG to
9957 /// ensure constants are generated when needed.  Remember the virtual registers
9958 /// that need to be added to the Machine PHI nodes as input.  We cannot just
9959 /// directly add them, because expansion might result in multiple MBB's for one
9960 /// BB.  As such, the start of the BB might correspond to a different MBB than
9961 /// the end.
9962 void
9963 SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
9964   const Instruction *TI = LLVMBB->getTerminator();
9965 
9966   SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
9967 
9968   // Check PHI nodes in successors that expect a value to be available from this
9969   // block.
9970   for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
9971     const BasicBlock *SuccBB = TI->getSuccessor(succ);
9972     if (!isa<PHINode>(SuccBB->begin())) continue;
9973     MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
9974 
9975     // If this terminator has multiple identical successors (common for
9976     // switches), only handle each succ once.
9977     if (!SuccsHandled.insert(SuccMBB).second)
9978       continue;
9979 
9980     MachineBasicBlock::iterator MBBI = SuccMBB->begin();
9981 
9982     // At this point we know that there is a 1-1 correspondence between LLVM PHI
9983     // nodes and Machine PHI nodes, but the incoming operands have not been
9984     // emitted yet.
9985     for (const PHINode &PN : SuccBB->phis()) {
9986       // Ignore dead phi's.
9987       if (PN.use_empty())
9988         continue;
9989 
9990       // Skip empty types
9991       if (PN.getType()->isEmptyTy())
9992         continue;
9993 
9994       unsigned Reg;
9995       const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
9996 
9997       if (const Constant *C = dyn_cast<Constant>(PHIOp)) {
9998         unsigned &RegOut = ConstantsOut[C];
9999         if (RegOut == 0) {
10000           RegOut = FuncInfo.CreateRegs(C);
10001           CopyValueToVirtualRegister(C, RegOut);
10002         }
10003         Reg = RegOut;
10004       } else {
10005         DenseMap<const Value *, Register>::iterator I =
10006           FuncInfo.ValueMap.find(PHIOp);
10007         if (I != FuncInfo.ValueMap.end())
10008           Reg = I->second;
10009         else {
10010           assert(isa<AllocaInst>(PHIOp) &&
10011                  FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
10012                  "Didn't codegen value into a register!??");
10013           Reg = FuncInfo.CreateRegs(PHIOp);
10014           CopyValueToVirtualRegister(PHIOp, Reg);
10015         }
10016       }
10017 
10018       // Remember that this register needs to added to the machine PHI node as
10019       // the input for this MBB.
10020       SmallVector<EVT, 4> ValueVTs;
10021       const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10022       ComputeValueVTs(TLI, DAG.getDataLayout(), PN.getType(), ValueVTs);
10023       for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
10024         EVT VT = ValueVTs[vti];
10025         unsigned NumRegisters = TLI.getNumRegisters(*DAG.getContext(), VT);
10026         for (unsigned i = 0, e = NumRegisters; i != e; ++i)
10027           FuncInfo.PHINodesToUpdate.push_back(
10028               std::make_pair(&*MBBI++, Reg + i));
10029         Reg += NumRegisters;
10030       }
10031     }
10032   }
10033 
10034   ConstantsOut.clear();
10035 }
10036 
10037 /// Add a successor MBB to ParentMBB< creating a new MachineBB for BB if SuccMBB
10038 /// is 0.
10039 MachineBasicBlock *
10040 SelectionDAGBuilder::StackProtectorDescriptor::
10041 AddSuccessorMBB(const BasicBlock *BB,
10042                 MachineBasicBlock *ParentMBB,
10043                 bool IsLikely,
10044                 MachineBasicBlock *SuccMBB) {
10045   // If SuccBB has not been created yet, create it.
10046   if (!SuccMBB) {
10047     MachineFunction *MF = ParentMBB->getParent();
10048     MachineFunction::iterator BBI(ParentMBB);
10049     SuccMBB = MF->CreateMachineBasicBlock(BB);
10050     MF->insert(++BBI, SuccMBB);
10051   }
10052   // Add it as a successor of ParentMBB.
10053   ParentMBB->addSuccessor(
10054       SuccMBB, BranchProbabilityInfo::getBranchProbStackProtector(IsLikely));
10055   return SuccMBB;
10056 }
10057 
10058 MachineBasicBlock *SelectionDAGBuilder::NextBlock(MachineBasicBlock *MBB) {
10059   MachineFunction::iterator I(MBB);
10060   if (++I == FuncInfo.MF->end())
10061     return nullptr;
10062   return &*I;
10063 }
10064 
10065 /// During lowering new call nodes can be created (such as memset, etc.).
10066 /// Those will become new roots of the current DAG, but complications arise
10067 /// when they are tail calls. In such cases, the call lowering will update
10068 /// the root, but the builder still needs to know that a tail call has been
10069 /// lowered in order to avoid generating an additional return.
10070 void SelectionDAGBuilder::updateDAGForMaybeTailCall(SDValue MaybeTC) {
10071   // If the node is null, we do have a tail call.
10072   if (MaybeTC.getNode() != nullptr)
10073     DAG.setRoot(MaybeTC);
10074   else
10075     HasTailCall = true;
10076 }
10077 
10078 void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W, Value *Cond,
10079                                         MachineBasicBlock *SwitchMBB,
10080                                         MachineBasicBlock *DefaultMBB) {
10081   MachineFunction *CurMF = FuncInfo.MF;
10082   MachineBasicBlock *NextMBB = nullptr;
10083   MachineFunction::iterator BBI(W.MBB);
10084   if (++BBI != FuncInfo.MF->end())
10085     NextMBB = &*BBI;
10086 
10087   unsigned Size = W.LastCluster - W.FirstCluster + 1;
10088 
10089   BranchProbabilityInfo *BPI = FuncInfo.BPI;
10090 
10091   if (Size == 2 && W.MBB == SwitchMBB) {
10092     // If any two of the cases has the same destination, and if one value
10093     // is the same as the other, but has one bit unset that the other has set,
10094     // use bit manipulation to do two compares at once.  For example:
10095     // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
10096     // TODO: This could be extended to merge any 2 cases in switches with 3
10097     // cases.
10098     // TODO: Handle cases where W.CaseBB != SwitchBB.
10099     CaseCluster &Small = *W.FirstCluster;
10100     CaseCluster &Big = *W.LastCluster;
10101 
10102     if (Small.Low == Small.High && Big.Low == Big.High &&
10103         Small.MBB == Big.MBB) {
10104       const APInt &SmallValue = Small.Low->getValue();
10105       const APInt &BigValue = Big.Low->getValue();
10106 
10107       // Check that there is only one bit different.
10108       APInt CommonBit = BigValue ^ SmallValue;
10109       if (CommonBit.isPowerOf2()) {
10110         SDValue CondLHS = getValue(Cond);
10111         EVT VT = CondLHS.getValueType();
10112         SDLoc DL = getCurSDLoc();
10113 
10114         SDValue Or = DAG.getNode(ISD::OR, DL, VT, CondLHS,
10115                                  DAG.getConstant(CommonBit, DL, VT));
10116         SDValue Cond = DAG.getSetCC(
10117             DL, MVT::i1, Or, DAG.getConstant(BigValue | SmallValue, DL, VT),
10118             ISD::SETEQ);
10119 
10120         // Update successor info.
10121         // Both Small and Big will jump to Small.BB, so we sum up the
10122         // probabilities.
10123         addSuccessorWithProb(SwitchMBB, Small.MBB, Small.Prob + Big.Prob);
10124         if (BPI)
10125           addSuccessorWithProb(
10126               SwitchMBB, DefaultMBB,
10127               // The default destination is the first successor in IR.
10128               BPI->getEdgeProbability(SwitchMBB->getBasicBlock(), (unsigned)0));
10129         else
10130           addSuccessorWithProb(SwitchMBB, DefaultMBB);
10131 
10132         // Insert the true branch.
10133         SDValue BrCond =
10134             DAG.getNode(ISD::BRCOND, DL, MVT::Other, getControlRoot(), Cond,
10135                         DAG.getBasicBlock(Small.MBB));
10136         // Insert the false branch.
10137         BrCond = DAG.getNode(ISD::BR, DL, MVT::Other, BrCond,
10138                              DAG.getBasicBlock(DefaultMBB));
10139 
10140         DAG.setRoot(BrCond);
10141         return;
10142       }
10143     }
10144   }
10145 
10146   if (TM.getOptLevel() != CodeGenOpt::None) {
10147     // Here, we order cases by probability so the most likely case will be
10148     // checked first. However, two clusters can have the same probability in
10149     // which case their relative ordering is non-deterministic. So we use Low
10150     // as a tie-breaker as clusters are guaranteed to never overlap.
10151     llvm::sort(W.FirstCluster, W.LastCluster + 1,
10152                [](const CaseCluster &a, const CaseCluster &b) {
10153       return a.Prob != b.Prob ?
10154              a.Prob > b.Prob :
10155              a.Low->getValue().slt(b.Low->getValue());
10156     });
10157 
10158     // Rearrange the case blocks so that the last one falls through if possible
10159     // without changing the order of probabilities.
10160     for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster; ) {
10161       --I;
10162       if (I->Prob > W.LastCluster->Prob)
10163         break;
10164       if (I->Kind == CC_Range && I->MBB == NextMBB) {
10165         std::swap(*I, *W.LastCluster);
10166         break;
10167       }
10168     }
10169   }
10170 
10171   // Compute total probability.
10172   BranchProbability DefaultProb = W.DefaultProb;
10173   BranchProbability UnhandledProbs = DefaultProb;
10174   for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
10175     UnhandledProbs += I->Prob;
10176 
10177   MachineBasicBlock *CurMBB = W.MBB;
10178   for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
10179     bool FallthroughUnreachable = false;
10180     MachineBasicBlock *Fallthrough;
10181     if (I == W.LastCluster) {
10182       // For the last cluster, fall through to the default destination.
10183       Fallthrough = DefaultMBB;
10184       FallthroughUnreachable = isa<UnreachableInst>(
10185           DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg());
10186     } else {
10187       Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
10188       CurMF->insert(BBI, Fallthrough);
10189       // Put Cond in a virtual register to make it available from the new blocks.
10190       ExportFromCurrentBlock(Cond);
10191     }
10192     UnhandledProbs -= I->Prob;
10193 
10194     switch (I->Kind) {
10195       case CC_JumpTable: {
10196         // FIXME: Optimize away range check based on pivot comparisons.
10197         JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first;
10198         SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second;
10199 
10200         // The jump block hasn't been inserted yet; insert it here.
10201         MachineBasicBlock *JumpMBB = JT->MBB;
10202         CurMF->insert(BBI, JumpMBB);
10203 
10204         auto JumpProb = I->Prob;
10205         auto FallthroughProb = UnhandledProbs;
10206 
10207         // If the default statement is a target of the jump table, we evenly
10208         // distribute the default probability to successors of CurMBB. Also
10209         // update the probability on the edge from JumpMBB to Fallthrough.
10210         for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
10211                                               SE = JumpMBB->succ_end();
10212              SI != SE; ++SI) {
10213           if (*SI == DefaultMBB) {
10214             JumpProb += DefaultProb / 2;
10215             FallthroughProb -= DefaultProb / 2;
10216             JumpMBB->setSuccProbability(SI, DefaultProb / 2);
10217             JumpMBB->normalizeSuccProbs();
10218             break;
10219           }
10220         }
10221 
10222         if (FallthroughUnreachable) {
10223           // Skip the range check if the fallthrough block is unreachable.
10224           JTH->OmitRangeCheck = true;
10225         }
10226 
10227         if (!JTH->OmitRangeCheck)
10228           addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
10229         addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
10230         CurMBB->normalizeSuccProbs();
10231 
10232         // The jump table header will be inserted in our current block, do the
10233         // range check, and fall through to our fallthrough block.
10234         JTH->HeaderBB = CurMBB;
10235         JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
10236 
10237         // If we're in the right place, emit the jump table header right now.
10238         if (CurMBB == SwitchMBB) {
10239           visitJumpTableHeader(*JT, *JTH, SwitchMBB);
10240           JTH->Emitted = true;
10241         }
10242         break;
10243       }
10244       case CC_BitTests: {
10245         // FIXME: Optimize away range check based on pivot comparisons.
10246         BitTestBlock *BTB = &SL->BitTestCases[I->BTCasesIndex];
10247 
10248         // The bit test blocks haven't been inserted yet; insert them here.
10249         for (BitTestCase &BTC : BTB->Cases)
10250           CurMF->insert(BBI, BTC.ThisBB);
10251 
10252         // Fill in fields of the BitTestBlock.
10253         BTB->Parent = CurMBB;
10254         BTB->Default = Fallthrough;
10255 
10256         BTB->DefaultProb = UnhandledProbs;
10257         // If the cases in bit test don't form a contiguous range, we evenly
10258         // distribute the probability on the edge to Fallthrough to two
10259         // successors of CurMBB.
10260         if (!BTB->ContiguousRange) {
10261           BTB->Prob += DefaultProb / 2;
10262           BTB->DefaultProb -= DefaultProb / 2;
10263         }
10264 
10265         if (FallthroughUnreachable) {
10266           // Skip the range check if the fallthrough block is unreachable.
10267           BTB->OmitRangeCheck = true;
10268         }
10269 
10270         // If we're in the right place, emit the bit test header right now.
10271         if (CurMBB == SwitchMBB) {
10272           visitBitTestHeader(*BTB, SwitchMBB);
10273           BTB->Emitted = true;
10274         }
10275         break;
10276       }
10277       case CC_Range: {
10278         const Value *RHS, *LHS, *MHS;
10279         ISD::CondCode CC;
10280         if (I->Low == I->High) {
10281           // Check Cond == I->Low.
10282           CC = ISD::SETEQ;
10283           LHS = Cond;
10284           RHS=I->Low;
10285           MHS = nullptr;
10286         } else {
10287           // Check I->Low <= Cond <= I->High.
10288           CC = ISD::SETLE;
10289           LHS = I->Low;
10290           MHS = Cond;
10291           RHS = I->High;
10292         }
10293 
10294         // If Fallthrough is unreachable, fold away the comparison.
10295         if (FallthroughUnreachable)
10296           CC = ISD::SETTRUE;
10297 
10298         // The false probability is the sum of all unhandled cases.
10299         CaseBlock CB(CC, LHS, RHS, MHS, I->MBB, Fallthrough, CurMBB,
10300                      getCurSDLoc(), I->Prob, UnhandledProbs);
10301 
10302         if (CurMBB == SwitchMBB)
10303           visitSwitchCase(CB, SwitchMBB);
10304         else
10305           SL->SwitchCases.push_back(CB);
10306 
10307         break;
10308       }
10309     }
10310     CurMBB = Fallthrough;
10311   }
10312 }
10313 
10314 unsigned SelectionDAGBuilder::caseClusterRank(const CaseCluster &CC,
10315                                               CaseClusterIt First,
10316                                               CaseClusterIt Last) {
10317   return std::count_if(First, Last + 1, [&](const CaseCluster &X) {
10318     if (X.Prob != CC.Prob)
10319       return X.Prob > CC.Prob;
10320 
10321     // Ties are broken by comparing the case value.
10322     return X.Low->getValue().slt(CC.Low->getValue());
10323   });
10324 }
10325 
10326 void SelectionDAGBuilder::splitWorkItem(SwitchWorkList &WorkList,
10327                                         const SwitchWorkListItem &W,
10328                                         Value *Cond,
10329                                         MachineBasicBlock *SwitchMBB) {
10330   assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) &&
10331          "Clusters not sorted?");
10332 
10333   assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!");
10334 
10335   // Balance the tree based on branch probabilities to create a near-optimal (in
10336   // terms of search time given key frequency) binary search tree. See e.g. Kurt
10337   // Mehlhorn "Nearly Optimal Binary Search Trees" (1975).
10338   CaseClusterIt LastLeft = W.FirstCluster;
10339   CaseClusterIt FirstRight = W.LastCluster;
10340   auto LeftProb = LastLeft->Prob + W.DefaultProb / 2;
10341   auto RightProb = FirstRight->Prob + W.DefaultProb / 2;
10342 
10343   // Move LastLeft and FirstRight towards each other from opposite directions to
10344   // find a partitioning of the clusters which balances the probability on both
10345   // sides. If LeftProb and RightProb are equal, alternate which side is
10346   // taken to ensure 0-probability nodes are distributed evenly.
10347   unsigned I = 0;
10348   while (LastLeft + 1 < FirstRight) {
10349     if (LeftProb < RightProb || (LeftProb == RightProb && (I & 1)))
10350       LeftProb += (++LastLeft)->Prob;
10351     else
10352       RightProb += (--FirstRight)->Prob;
10353     I++;
10354   }
10355 
10356   while (true) {
10357     // Our binary search tree differs from a typical BST in that ours can have up
10358     // to three values in each leaf. The pivot selection above doesn't take that
10359     // into account, which means the tree might require more nodes and be less
10360     // efficient. We compensate for this here.
10361 
10362     unsigned NumLeft = LastLeft - W.FirstCluster + 1;
10363     unsigned NumRight = W.LastCluster - FirstRight + 1;
10364 
10365     if (std::min(NumLeft, NumRight) < 3 && std::max(NumLeft, NumRight) > 3) {
10366       // If one side has less than 3 clusters, and the other has more than 3,
10367       // consider taking a cluster from the other side.
10368 
10369       if (NumLeft < NumRight) {
10370         // Consider moving the first cluster on the right to the left side.
10371         CaseCluster &CC = *FirstRight;
10372         unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
10373         unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
10374         if (LeftSideRank <= RightSideRank) {
10375           // Moving the cluster to the left does not demote it.
10376           ++LastLeft;
10377           ++FirstRight;
10378           continue;
10379         }
10380       } else {
10381         assert(NumRight < NumLeft);
10382         // Consider moving the last element on the left to the right side.
10383         CaseCluster &CC = *LastLeft;
10384         unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
10385         unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
10386         if (RightSideRank <= LeftSideRank) {
10387           // Moving the cluster to the right does not demot it.
10388           --LastLeft;
10389           --FirstRight;
10390           continue;
10391         }
10392       }
10393     }
10394     break;
10395   }
10396 
10397   assert(LastLeft + 1 == FirstRight);
10398   assert(LastLeft >= W.FirstCluster);
10399   assert(FirstRight <= W.LastCluster);
10400 
10401   // Use the first element on the right as pivot since we will make less-than
10402   // comparisons against it.
10403   CaseClusterIt PivotCluster = FirstRight;
10404   assert(PivotCluster > W.FirstCluster);
10405   assert(PivotCluster <= W.LastCluster);
10406 
10407   CaseClusterIt FirstLeft = W.FirstCluster;
10408   CaseClusterIt LastRight = W.LastCluster;
10409 
10410   const ConstantInt *Pivot = PivotCluster->Low;
10411 
10412   // New blocks will be inserted immediately after the current one.
10413   MachineFunction::iterator BBI(W.MBB);
10414   ++BBI;
10415 
10416   // We will branch to the LHS if Value < Pivot. If LHS is a single cluster,
10417   // we can branch to its destination directly if it's squeezed exactly in
10418   // between the known lower bound and Pivot - 1.
10419   MachineBasicBlock *LeftMBB;
10420   if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&
10421       FirstLeft->Low == W.GE &&
10422       (FirstLeft->High->getValue() + 1LL) == Pivot->getValue()) {
10423     LeftMBB = FirstLeft->MBB;
10424   } else {
10425     LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
10426     FuncInfo.MF->insert(BBI, LeftMBB);
10427     WorkList.push_back(
10428         {LeftMBB, FirstLeft, LastLeft, W.GE, Pivot, W.DefaultProb / 2});
10429     // Put Cond in a virtual register to make it available from the new blocks.
10430     ExportFromCurrentBlock(Cond);
10431   }
10432 
10433   // Similarly, we will branch to the RHS if Value >= Pivot. If RHS is a
10434   // single cluster, RHS.Low == Pivot, and we can branch to its destination
10435   // directly if RHS.High equals the current upper bound.
10436   MachineBasicBlock *RightMBB;
10437   if (FirstRight == LastRight && FirstRight->Kind == CC_Range &&
10438       W.LT && (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) {
10439     RightMBB = FirstRight->MBB;
10440   } else {
10441     RightMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
10442     FuncInfo.MF->insert(BBI, RightMBB);
10443     WorkList.push_back(
10444         {RightMBB, FirstRight, LastRight, Pivot, W.LT, W.DefaultProb / 2});
10445     // Put Cond in a virtual register to make it available from the new blocks.
10446     ExportFromCurrentBlock(Cond);
10447   }
10448 
10449   // Create the CaseBlock record that will be used to lower the branch.
10450   CaseBlock CB(ISD::SETLT, Cond, Pivot, nullptr, LeftMBB, RightMBB, W.MBB,
10451                getCurSDLoc(), LeftProb, RightProb);
10452 
10453   if (W.MBB == SwitchMBB)
10454     visitSwitchCase(CB, SwitchMBB);
10455   else
10456     SL->SwitchCases.push_back(CB);
10457 }
10458 
10459 // Scale CaseProb after peeling a case with the probablity of PeeledCaseProb
10460 // from the swith statement.
10461 static BranchProbability scaleCaseProbality(BranchProbability CaseProb,
10462                                             BranchProbability PeeledCaseProb) {
10463   if (PeeledCaseProb == BranchProbability::getOne())
10464     return BranchProbability::getZero();
10465   BranchProbability SwitchProb = PeeledCaseProb.getCompl();
10466 
10467   uint32_t Numerator = CaseProb.getNumerator();
10468   uint32_t Denominator = SwitchProb.scale(CaseProb.getDenominator());
10469   return BranchProbability(Numerator, std::max(Numerator, Denominator));
10470 }
10471 
10472 // Try to peel the top probability case if it exceeds the threshold.
10473 // Return current MachineBasicBlock for the switch statement if the peeling
10474 // does not occur.
10475 // If the peeling is performed, return the newly created MachineBasicBlock
10476 // for the peeled switch statement. Also update Clusters to remove the peeled
10477 // case. PeeledCaseProb is the BranchProbability for the peeled case.
10478 MachineBasicBlock *SelectionDAGBuilder::peelDominantCaseCluster(
10479     const SwitchInst &SI, CaseClusterVector &Clusters,
10480     BranchProbability &PeeledCaseProb) {
10481   MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
10482   // Don't perform if there is only one cluster or optimizing for size.
10483   if (SwitchPeelThreshold > 100 || !FuncInfo.BPI || Clusters.size() < 2 ||
10484       TM.getOptLevel() == CodeGenOpt::None ||
10485       SwitchMBB->getParent()->getFunction().hasMinSize())
10486     return SwitchMBB;
10487 
10488   BranchProbability TopCaseProb = BranchProbability(SwitchPeelThreshold, 100);
10489   unsigned PeeledCaseIndex = 0;
10490   bool SwitchPeeled = false;
10491   for (unsigned Index = 0; Index < Clusters.size(); ++Index) {
10492     CaseCluster &CC = Clusters[Index];
10493     if (CC.Prob < TopCaseProb)
10494       continue;
10495     TopCaseProb = CC.Prob;
10496     PeeledCaseIndex = Index;
10497     SwitchPeeled = true;
10498   }
10499   if (!SwitchPeeled)
10500     return SwitchMBB;
10501 
10502   LLVM_DEBUG(dbgs() << "Peeled one top case in switch stmt, prob: "
10503                     << TopCaseProb << "\n");
10504 
10505   // Record the MBB for the peeled switch statement.
10506   MachineFunction::iterator BBI(SwitchMBB);
10507   ++BBI;
10508   MachineBasicBlock *PeeledSwitchMBB =
10509       FuncInfo.MF->CreateMachineBasicBlock(SwitchMBB->getBasicBlock());
10510   FuncInfo.MF->insert(BBI, PeeledSwitchMBB);
10511 
10512   ExportFromCurrentBlock(SI.getCondition());
10513   auto PeeledCaseIt = Clusters.begin() + PeeledCaseIndex;
10514   SwitchWorkListItem W = {SwitchMBB, PeeledCaseIt, PeeledCaseIt,
10515                           nullptr,   nullptr,      TopCaseProb.getCompl()};
10516   lowerWorkItem(W, SI.getCondition(), SwitchMBB, PeeledSwitchMBB);
10517 
10518   Clusters.erase(PeeledCaseIt);
10519   for (CaseCluster &CC : Clusters) {
10520     LLVM_DEBUG(
10521         dbgs() << "Scale the probablity for one cluster, before scaling: "
10522                << CC.Prob << "\n");
10523     CC.Prob = scaleCaseProbality(CC.Prob, TopCaseProb);
10524     LLVM_DEBUG(dbgs() << "After scaling: " << CC.Prob << "\n");
10525   }
10526   PeeledCaseProb = TopCaseProb;
10527   return PeeledSwitchMBB;
10528 }
10529 
10530 void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
10531   // Extract cases from the switch.
10532   BranchProbabilityInfo *BPI = FuncInfo.BPI;
10533   CaseClusterVector Clusters;
10534   Clusters.reserve(SI.getNumCases());
10535   for (auto I : SI.cases()) {
10536     MachineBasicBlock *Succ = FuncInfo.MBBMap[I.getCaseSuccessor()];
10537     const ConstantInt *CaseVal = I.getCaseValue();
10538     BranchProbability Prob =
10539         BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
10540             : BranchProbability(1, SI.getNumCases() + 1);
10541     Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
10542   }
10543 
10544   MachineBasicBlock *DefaultMBB = FuncInfo.MBBMap[SI.getDefaultDest()];
10545 
10546   // Cluster adjacent cases with the same destination. We do this at all
10547   // optimization levels because it's cheap to do and will make codegen faster
10548   // if there are many clusters.
10549   sortAndRangeify(Clusters);
10550 
10551   // The branch probablity of the peeled case.
10552   BranchProbability PeeledCaseProb = BranchProbability::getZero();
10553   MachineBasicBlock *PeeledSwitchMBB =
10554       peelDominantCaseCluster(SI, Clusters, PeeledCaseProb);
10555 
10556   // If there is only the default destination, jump there directly.
10557   MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
10558   if (Clusters.empty()) {
10559     assert(PeeledSwitchMBB == SwitchMBB);
10560     SwitchMBB->addSuccessor(DefaultMBB);
10561     if (DefaultMBB != NextBlock(SwitchMBB)) {
10562       DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
10563                               getControlRoot(), DAG.getBasicBlock(DefaultMBB)));
10564     }
10565     return;
10566   }
10567 
10568   SL->findJumpTables(Clusters, &SI, DefaultMBB, DAG.getPSI(), DAG.getBFI());
10569   SL->findBitTestClusters(Clusters, &SI);
10570 
10571   LLVM_DEBUG({
10572     dbgs() << "Case clusters: ";
10573     for (const CaseCluster &C : Clusters) {
10574       if (C.Kind == CC_JumpTable)
10575         dbgs() << "JT:";
10576       if (C.Kind == CC_BitTests)
10577         dbgs() << "BT:";
10578 
10579       C.Low->getValue().print(dbgs(), true);
10580       if (C.Low != C.High) {
10581         dbgs() << '-';
10582         C.High->getValue().print(dbgs(), true);
10583       }
10584       dbgs() << ' ';
10585     }
10586     dbgs() << '\n';
10587   });
10588 
10589   assert(!Clusters.empty());
10590   SwitchWorkList WorkList;
10591   CaseClusterIt First = Clusters.begin();
10592   CaseClusterIt Last = Clusters.end() - 1;
10593   auto DefaultProb = getEdgeProbability(PeeledSwitchMBB, DefaultMBB);
10594   // Scale the branchprobability for DefaultMBB if the peel occurs and
10595   // DefaultMBB is not replaced.
10596   if (PeeledCaseProb != BranchProbability::getZero() &&
10597       DefaultMBB == FuncInfo.MBBMap[SI.getDefaultDest()])
10598     DefaultProb = scaleCaseProbality(DefaultProb, PeeledCaseProb);
10599   WorkList.push_back(
10600       {PeeledSwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
10601 
10602   while (!WorkList.empty()) {
10603     SwitchWorkListItem W = WorkList.back();
10604     WorkList.pop_back();
10605     unsigned NumClusters = W.LastCluster - W.FirstCluster + 1;
10606 
10607     if (NumClusters > 3 && TM.getOptLevel() != CodeGenOpt::None &&
10608         !DefaultMBB->getParent()->getFunction().hasMinSize()) {
10609       // For optimized builds, lower large range as a balanced binary tree.
10610       splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB);
10611       continue;
10612     }
10613 
10614     lowerWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB);
10615   }
10616 }
10617 
10618 void SelectionDAGBuilder::visitFreeze(const FreezeInst &I) {
10619   SmallVector<EVT, 4> ValueVTs;
10620   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
10621                   ValueVTs);
10622   unsigned NumValues = ValueVTs.size();
10623   if (NumValues == 0) return;
10624 
10625   SmallVector<SDValue, 4> Values(NumValues);
10626   SDValue Op = getValue(I.getOperand(0));
10627 
10628   for (unsigned i = 0; i != NumValues; ++i)
10629     Values[i] = DAG.getNode(ISD::FREEZE, getCurSDLoc(), ValueVTs[i],
10630                             SDValue(Op.getNode(), Op.getResNo() + i));
10631 
10632   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
10633                            DAG.getVTList(ValueVTs), Values));
10634 }
10635