xref: /llvm-project/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp (revision 81b7f115fb272ef6fd6967f4121b64814b4bcf10)
1 //===- SelectionDAGBuilder.cpp - Selection-DAG building -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements routines for translating from LLVM IR into SelectionDAG IR.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "SelectionDAGBuilder.h"
14 #include "SDNodeDbgValue.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/BitVector.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/SmallSet.h"
21 #include "llvm/ADT/StringRef.h"
22 #include "llvm/ADT/Twine.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/Analysis/BranchProbabilityInfo.h"
25 #include "llvm/Analysis/ConstantFolding.h"
26 #include "llvm/Analysis/Loads.h"
27 #include "llvm/Analysis/MemoryLocation.h"
28 #include "llvm/Analysis/TargetLibraryInfo.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/Analysis/VectorUtils.h"
31 #include "llvm/CodeGen/Analysis.h"
32 #include "llvm/CodeGen/AssignmentTrackingAnalysis.h"
33 #include "llvm/CodeGen/CodeGenCommonISel.h"
34 #include "llvm/CodeGen/FunctionLoweringInfo.h"
35 #include "llvm/CodeGen/GCMetadata.h"
36 #include "llvm/CodeGen/ISDOpcodes.h"
37 #include "llvm/CodeGen/MachineBasicBlock.h"
38 #include "llvm/CodeGen/MachineFrameInfo.h"
39 #include "llvm/CodeGen/MachineFunction.h"
40 #include "llvm/CodeGen/MachineInstrBuilder.h"
41 #include "llvm/CodeGen/MachineInstrBundleIterator.h"
42 #include "llvm/CodeGen/MachineMemOperand.h"
43 #include "llvm/CodeGen/MachineModuleInfo.h"
44 #include "llvm/CodeGen/MachineOperand.h"
45 #include "llvm/CodeGen/MachineRegisterInfo.h"
46 #include "llvm/CodeGen/RuntimeLibcalls.h"
47 #include "llvm/CodeGen/SelectionDAG.h"
48 #include "llvm/CodeGen/SelectionDAGTargetInfo.h"
49 #include "llvm/CodeGen/StackMaps.h"
50 #include "llvm/CodeGen/SwiftErrorValueTracking.h"
51 #include "llvm/CodeGen/TargetFrameLowering.h"
52 #include "llvm/CodeGen/TargetInstrInfo.h"
53 #include "llvm/CodeGen/TargetOpcodes.h"
54 #include "llvm/CodeGen/TargetRegisterInfo.h"
55 #include "llvm/CodeGen/TargetSubtargetInfo.h"
56 #include "llvm/CodeGen/WinEHFuncInfo.h"
57 #include "llvm/IR/Argument.h"
58 #include "llvm/IR/Attributes.h"
59 #include "llvm/IR/BasicBlock.h"
60 #include "llvm/IR/CFG.h"
61 #include "llvm/IR/CallingConv.h"
62 #include "llvm/IR/Constant.h"
63 #include "llvm/IR/ConstantRange.h"
64 #include "llvm/IR/Constants.h"
65 #include "llvm/IR/DataLayout.h"
66 #include "llvm/IR/DebugInfo.h"
67 #include "llvm/IR/DebugInfoMetadata.h"
68 #include "llvm/IR/DerivedTypes.h"
69 #include "llvm/IR/DiagnosticInfo.h"
70 #include "llvm/IR/EHPersonalities.h"
71 #include "llvm/IR/Function.h"
72 #include "llvm/IR/GetElementPtrTypeIterator.h"
73 #include "llvm/IR/InlineAsm.h"
74 #include "llvm/IR/InstrTypes.h"
75 #include "llvm/IR/Instructions.h"
76 #include "llvm/IR/IntrinsicInst.h"
77 #include "llvm/IR/Intrinsics.h"
78 #include "llvm/IR/IntrinsicsAArch64.h"
79 #include "llvm/IR/IntrinsicsAMDGPU.h"
80 #include "llvm/IR/IntrinsicsWebAssembly.h"
81 #include "llvm/IR/LLVMContext.h"
82 #include "llvm/IR/Metadata.h"
83 #include "llvm/IR/Module.h"
84 #include "llvm/IR/Operator.h"
85 #include "llvm/IR/PatternMatch.h"
86 #include "llvm/IR/Statepoint.h"
87 #include "llvm/IR/Type.h"
88 #include "llvm/IR/User.h"
89 #include "llvm/IR/Value.h"
90 #include "llvm/MC/MCContext.h"
91 #include "llvm/Support/AtomicOrdering.h"
92 #include "llvm/Support/Casting.h"
93 #include "llvm/Support/CommandLine.h"
94 #include "llvm/Support/Compiler.h"
95 #include "llvm/Support/Debug.h"
96 #include "llvm/Support/MathExtras.h"
97 #include "llvm/Support/raw_ostream.h"
98 #include "llvm/Target/TargetIntrinsicInfo.h"
99 #include "llvm/Target/TargetMachine.h"
100 #include "llvm/Target/TargetOptions.h"
101 #include "llvm/TargetParser/Triple.h"
102 #include "llvm/Transforms/Utils/Local.h"
103 #include <cstddef>
104 #include <iterator>
105 #include <limits>
106 #include <optional>
107 #include <tuple>
108 
109 using namespace llvm;
110 using namespace PatternMatch;
111 using namespace SwitchCG;
112 
113 #define DEBUG_TYPE "isel"
114 
115 /// LimitFloatPrecision - Generate low-precision inline sequences for
116 /// some float libcalls (6, 8 or 12 bits).
117 static unsigned LimitFloatPrecision;
118 
119 static cl::opt<bool>
120     InsertAssertAlign("insert-assert-align", cl::init(true),
121                       cl::desc("Insert the experimental `assertalign` node."),
122                       cl::ReallyHidden);
123 
124 static cl::opt<unsigned, true>
125     LimitFPPrecision("limit-float-precision",
126                      cl::desc("Generate low-precision inline sequences "
127                               "for some float libcalls"),
128                      cl::location(LimitFloatPrecision), cl::Hidden,
129                      cl::init(0));
130 
131 static cl::opt<unsigned> SwitchPeelThreshold(
132     "switch-peel-threshold", cl::Hidden, cl::init(66),
133     cl::desc("Set the case probability threshold for peeling the case from a "
134              "switch statement. A value greater than 100 will void this "
135              "optimization"));
136 
137 // Limit the width of DAG chains. This is important in general to prevent
138 // DAG-based analysis from blowing up. For example, alias analysis and
139 // load clustering may not complete in reasonable time. It is difficult to
140 // recognize and avoid this situation within each individual analysis, and
141 // future analyses are likely to have the same behavior. Limiting DAG width is
142 // the safe approach and will be especially important with global DAGs.
143 //
144 // MaxParallelChains default is arbitrarily high to avoid affecting
145 // optimization, but could be lowered to improve compile time. Any ld-ld-st-st
146 // sequence over this should have been converted to llvm.memcpy by the
147 // frontend. It is easy to induce this behavior with .ll code such as:
148 // %buffer = alloca [4096 x i8]
149 // %data = load [4096 x i8]* %argPtr
150 // store [4096 x i8] %data, [4096 x i8]* %buffer
151 static const unsigned MaxParallelChains = 64;
152 
153 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
154                                       const SDValue *Parts, unsigned NumParts,
155                                       MVT PartVT, EVT ValueVT, const Value *V,
156                                       std::optional<CallingConv::ID> CC);
157 
158 /// getCopyFromParts - Create a value that contains the specified legal parts
159 /// combined into the value they represent.  If the parts combine to a type
160 /// larger than ValueVT then AssertOp can be used to specify whether the extra
161 /// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
162 /// (ISD::AssertSext).
163 static SDValue
164 getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts,
165                  unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V,
166                  std::optional<CallingConv::ID> CC = std::nullopt,
167                  std::optional<ISD::NodeType> AssertOp = std::nullopt) {
168   // Let the target assemble the parts if it wants to
169   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
170   if (SDValue Val = TLI.joinRegisterPartsIntoValue(DAG, DL, Parts, NumParts,
171                                                    PartVT, ValueVT, CC))
172     return Val;
173 
174   if (ValueVT.isVector())
175     return getCopyFromPartsVector(DAG, DL, Parts, NumParts, PartVT, ValueVT, V,
176                                   CC);
177 
178   assert(NumParts > 0 && "No parts to assemble!");
179   SDValue Val = Parts[0];
180 
181   if (NumParts > 1) {
182     // Assemble the value from multiple parts.
183     if (ValueVT.isInteger()) {
184       unsigned PartBits = PartVT.getSizeInBits();
185       unsigned ValueBits = ValueVT.getSizeInBits();
186 
187       // Assemble the power of 2 part.
188       unsigned RoundParts = llvm::bit_floor(NumParts);
189       unsigned RoundBits = PartBits * RoundParts;
190       EVT RoundVT = RoundBits == ValueBits ?
191         ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
192       SDValue Lo, Hi;
193 
194       EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
195 
196       if (RoundParts > 2) {
197         Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2,
198                               PartVT, HalfVT, V);
199         Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2,
200                               RoundParts / 2, PartVT, HalfVT, V);
201       } else {
202         Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]);
203         Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]);
204       }
205 
206       if (DAG.getDataLayout().isBigEndian())
207         std::swap(Lo, Hi);
208 
209       Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi);
210 
211       if (RoundParts < NumParts) {
212         // Assemble the trailing non-power-of-2 part.
213         unsigned OddParts = NumParts - RoundParts;
214         EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
215         Hi = getCopyFromParts(DAG, DL, Parts + RoundParts, OddParts, PartVT,
216                               OddVT, V, CC);
217 
218         // Combine the round and odd parts.
219         Lo = Val;
220         if (DAG.getDataLayout().isBigEndian())
221           std::swap(Lo, Hi);
222         EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
223         Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi);
224         Hi = DAG.getNode(ISD::SHL, DL, TotalVT, Hi,
225                          DAG.getConstant(Lo.getValueSizeInBits(), DL,
226                                          TLI.getShiftAmountTy(
227                                              TotalVT, DAG.getDataLayout())));
228         Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo);
229         Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi);
230       }
231     } else if (PartVT.isFloatingPoint()) {
232       // FP split into multiple FP parts (for ppcf128)
233       assert(ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
234              "Unexpected split");
235       SDValue Lo, Hi;
236       Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]);
237       Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]);
238       if (TLI.hasBigEndianPartOrdering(ValueVT, DAG.getDataLayout()))
239         std::swap(Lo, Hi);
240       Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi);
241     } else {
242       // FP split into integer parts (soft fp)
243       assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
244              !PartVT.isVector() && "Unexpected split");
245       EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
246       Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V, CC);
247     }
248   }
249 
250   // There is now one part, held in Val.  Correct it to match ValueVT.
251   // PartEVT is the type of the register class that holds the value.
252   // ValueVT is the type of the inline asm operation.
253   EVT PartEVT = Val.getValueType();
254 
255   if (PartEVT == ValueVT)
256     return Val;
257 
258   if (PartEVT.isInteger() && ValueVT.isFloatingPoint() &&
259       ValueVT.bitsLT(PartEVT)) {
260     // For an FP value in an integer part, we need to truncate to the right
261     // width first.
262     PartEVT = EVT::getIntegerVT(*DAG.getContext(),  ValueVT.getSizeInBits());
263     Val = DAG.getNode(ISD::TRUNCATE, DL, PartEVT, Val);
264   }
265 
266   // Handle types that have the same size.
267   if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits())
268     return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
269 
270   // Handle types with different sizes.
271   if (PartEVT.isInteger() && ValueVT.isInteger()) {
272     if (ValueVT.bitsLT(PartEVT)) {
273       // For a truncate, see if we have any information to
274       // indicate whether the truncated bits will always be
275       // zero or sign-extension.
276       if (AssertOp)
277         Val = DAG.getNode(*AssertOp, DL, PartEVT, Val,
278                           DAG.getValueType(ValueVT));
279       return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
280     }
281     return DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
282   }
283 
284   if (PartEVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
285     // FP_ROUND's are always exact here.
286     if (ValueVT.bitsLT(Val.getValueType()))
287       return DAG.getNode(
288           ISD::FP_ROUND, DL, ValueVT, Val,
289           DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout())));
290 
291     return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val);
292   }
293 
294   // Handle MMX to a narrower integer type by bitcasting MMX to integer and
295   // then truncating.
296   if (PartEVT == MVT::x86mmx && ValueVT.isInteger() &&
297       ValueVT.bitsLT(PartEVT)) {
298     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i64, Val);
299     return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
300   }
301 
302   report_fatal_error("Unknown mismatch in getCopyFromParts!");
303 }
304 
305 static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V,
306                                               const Twine &ErrMsg) {
307   const Instruction *I = dyn_cast_or_null<Instruction>(V);
308   if (!V)
309     return Ctx.emitError(ErrMsg);
310 
311   const char *AsmError = ", possible invalid constraint for vector type";
312   if (const CallInst *CI = dyn_cast<CallInst>(I))
313     if (CI->isInlineAsm())
314       return Ctx.emitError(I, ErrMsg + AsmError);
315 
316   return Ctx.emitError(I, ErrMsg);
317 }
318 
319 /// getCopyFromPartsVector - Create a value that contains the specified legal
320 /// parts combined into the value they represent.  If the parts combine to a
321 /// type larger than ValueVT then AssertOp can be used to specify whether the
322 /// extra bits are known to be zero (ISD::AssertZext) or sign extended from
323 /// ValueVT (ISD::AssertSext).
324 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
325                                       const SDValue *Parts, unsigned NumParts,
326                                       MVT PartVT, EVT ValueVT, const Value *V,
327                                       std::optional<CallingConv::ID> CallConv) {
328   assert(ValueVT.isVector() && "Not a vector value");
329   assert(NumParts > 0 && "No parts to assemble!");
330   const bool IsABIRegCopy = CallConv.has_value();
331 
332   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
333   SDValue Val = Parts[0];
334 
335   // Handle a multi-element vector.
336   if (NumParts > 1) {
337     EVT IntermediateVT;
338     MVT RegisterVT;
339     unsigned NumIntermediates;
340     unsigned NumRegs;
341 
342     if (IsABIRegCopy) {
343       NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
344           *DAG.getContext(), *CallConv, ValueVT, IntermediateVT,
345           NumIntermediates, RegisterVT);
346     } else {
347       NumRegs =
348           TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
349                                      NumIntermediates, RegisterVT);
350     }
351 
352     assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
353     NumParts = NumRegs; // Silence a compiler warning.
354     assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
355     assert(RegisterVT.getSizeInBits() ==
356            Parts[0].getSimpleValueType().getSizeInBits() &&
357            "Part type sizes don't match!");
358 
359     // Assemble the parts into intermediate operands.
360     SmallVector<SDValue, 8> Ops(NumIntermediates);
361     if (NumIntermediates == NumParts) {
362       // If the register was not expanded, truncate or copy the value,
363       // as appropriate.
364       for (unsigned i = 0; i != NumParts; ++i)
365         Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1,
366                                   PartVT, IntermediateVT, V, CallConv);
367     } else if (NumParts > 0) {
368       // If the intermediate type was expanded, build the intermediate
369       // operands from the parts.
370       assert(NumParts % NumIntermediates == 0 &&
371              "Must expand into a divisible number of parts!");
372       unsigned Factor = NumParts / NumIntermediates;
373       for (unsigned i = 0; i != NumIntermediates; ++i)
374         Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor,
375                                   PartVT, IntermediateVT, V, CallConv);
376     }
377 
378     // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
379     // intermediate operands.
380     EVT BuiltVectorTy =
381         IntermediateVT.isVector()
382             ? EVT::getVectorVT(
383                   *DAG.getContext(), IntermediateVT.getScalarType(),
384                   IntermediateVT.getVectorElementCount() * NumParts)
385             : EVT::getVectorVT(*DAG.getContext(),
386                                IntermediateVT.getScalarType(),
387                                NumIntermediates);
388     Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS
389                                                 : ISD::BUILD_VECTOR,
390                       DL, BuiltVectorTy, Ops);
391   }
392 
393   // There is now one part, held in Val.  Correct it to match ValueVT.
394   EVT PartEVT = Val.getValueType();
395 
396   if (PartEVT == ValueVT)
397     return Val;
398 
399   if (PartEVT.isVector()) {
400     // Vector/Vector bitcast.
401     if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
402       return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
403 
404     // If the parts vector has more elements than the value vector, then we
405     // have a vector widening case (e.g. <2 x float> -> <4 x float>).
406     // Extract the elements we want.
407     if (PartEVT.getVectorElementCount() != ValueVT.getVectorElementCount()) {
408       assert((PartEVT.getVectorElementCount().getKnownMinValue() >
409               ValueVT.getVectorElementCount().getKnownMinValue()) &&
410              (PartEVT.getVectorElementCount().isScalable() ==
411               ValueVT.getVectorElementCount().isScalable()) &&
412              "Cannot narrow, it would be a lossy transformation");
413       PartEVT =
414           EVT::getVectorVT(*DAG.getContext(), PartEVT.getVectorElementType(),
415                            ValueVT.getVectorElementCount());
416       Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, PartEVT, Val,
417                         DAG.getVectorIdxConstant(0, DL));
418       if (PartEVT == ValueVT)
419         return Val;
420       if (PartEVT.isInteger() && ValueVT.isFloatingPoint())
421         return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
422 
423       // Vector/Vector bitcast (e.g. <2 x bfloat> -> <2 x half>).
424       if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
425         return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
426     }
427 
428     // Promoted vector extract
429     return DAG.getAnyExtOrTrunc(Val, DL, ValueVT);
430   }
431 
432   // Trivial bitcast if the types are the same size and the destination
433   // vector type is legal.
434   if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits() &&
435       TLI.isTypeLegal(ValueVT))
436     return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
437 
438   if (ValueVT.getVectorNumElements() != 1) {
439      // Certain ABIs require that vectors are passed as integers. For vectors
440      // are the same size, this is an obvious bitcast.
441      if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) {
442        return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
443      } else if (ValueVT.bitsLT(PartEVT)) {
444        const uint64_t ValueSize = ValueVT.getFixedSizeInBits();
445        EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize);
446        // Drop the extra bits.
447        Val = DAG.getNode(ISD::TRUNCATE, DL, IntermediateType, Val);
448        return DAG.getBitcast(ValueVT, Val);
449      }
450 
451      diagnosePossiblyInvalidConstraint(
452          *DAG.getContext(), V, "non-trivial scalar-to-vector conversion");
453      return DAG.getUNDEF(ValueVT);
454   }
455 
456   // Handle cases such as i8 -> <1 x i1>
457   EVT ValueSVT = ValueVT.getVectorElementType();
458   if (ValueVT.getVectorNumElements() == 1 && ValueSVT != PartEVT) {
459     unsigned ValueSize = ValueSVT.getSizeInBits();
460     if (ValueSize == PartEVT.getSizeInBits()) {
461       Val = DAG.getNode(ISD::BITCAST, DL, ValueSVT, Val);
462     } else if (ValueSVT.isFloatingPoint() && PartEVT.isInteger()) {
463       // It's possible a scalar floating point type gets softened to integer and
464       // then promoted to a larger integer. If PartEVT is the larger integer
465       // we need to truncate it and then bitcast to the FP type.
466       assert(ValueSVT.bitsLT(PartEVT) && "Unexpected types");
467       EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize);
468       Val = DAG.getNode(ISD::TRUNCATE, DL, IntermediateType, Val);
469       Val = DAG.getBitcast(ValueSVT, Val);
470     } else {
471       Val = ValueVT.isFloatingPoint()
472                 ? DAG.getFPExtendOrRound(Val, DL, ValueSVT)
473                 : DAG.getAnyExtOrTrunc(Val, DL, ValueSVT);
474     }
475   }
476 
477   return DAG.getBuildVector(ValueVT, DL, Val);
478 }
479 
480 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl,
481                                  SDValue Val, SDValue *Parts, unsigned NumParts,
482                                  MVT PartVT, const Value *V,
483                                  std::optional<CallingConv::ID> CallConv);
484 
485 /// getCopyToParts - Create a series of nodes that contain the specified value
486 /// split into legal parts.  If the parts contain more bits than Val, then, for
487 /// integers, ExtendKind can be used to specify how to generate the extra bits.
488 static void
489 getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
490                unsigned NumParts, MVT PartVT, const Value *V,
491                std::optional<CallingConv::ID> CallConv = std::nullopt,
492                ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
493   // Let the target split the parts if it wants to
494   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
495   if (TLI.splitValueIntoRegisterParts(DAG, DL, Val, Parts, NumParts, PartVT,
496                                       CallConv))
497     return;
498   EVT ValueVT = Val.getValueType();
499 
500   // Handle the vector case separately.
501   if (ValueVT.isVector())
502     return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V,
503                                 CallConv);
504 
505   unsigned OrigNumParts = NumParts;
506   assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) &&
507          "Copying to an illegal type!");
508 
509   if (NumParts == 0)
510     return;
511 
512   assert(!ValueVT.isVector() && "Vector case handled elsewhere");
513   EVT PartEVT = PartVT;
514   if (PartEVT == ValueVT) {
515     assert(NumParts == 1 && "No-op copy with multiple parts!");
516     Parts[0] = Val;
517     return;
518   }
519 
520   unsigned PartBits = PartVT.getSizeInBits();
521   if (NumParts * PartBits > ValueVT.getSizeInBits()) {
522     // If the parts cover more bits than the value has, promote the value.
523     if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
524       assert(NumParts == 1 && "Do not know what to promote to!");
525       Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val);
526     } else {
527       if (ValueVT.isFloatingPoint()) {
528         // FP values need to be bitcast, then extended if they are being put
529         // into a larger container.
530         ValueVT = EVT::getIntegerVT(*DAG.getContext(),  ValueVT.getSizeInBits());
531         Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
532       }
533       assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
534              ValueVT.isInteger() &&
535              "Unknown mismatch!");
536       ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
537       Val = DAG.getNode(ExtendKind, DL, ValueVT, Val);
538       if (PartVT == MVT::x86mmx)
539         Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
540     }
541   } else if (PartBits == ValueVT.getSizeInBits()) {
542     // Different types of the same size.
543     assert(NumParts == 1 && PartEVT != ValueVT);
544     Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
545   } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
546     // If the parts cover less bits than value has, truncate the value.
547     assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
548            ValueVT.isInteger() &&
549            "Unknown mismatch!");
550     ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
551     Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
552     if (PartVT == MVT::x86mmx)
553       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
554   }
555 
556   // The value may have changed - recompute ValueVT.
557   ValueVT = Val.getValueType();
558   assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
559          "Failed to tile the value with PartVT!");
560 
561   if (NumParts == 1) {
562     if (PartEVT != ValueVT) {
563       diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
564                                         "scalar-to-vector conversion failed");
565       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
566     }
567 
568     Parts[0] = Val;
569     return;
570   }
571 
572   // Expand the value into multiple parts.
573   if (NumParts & (NumParts - 1)) {
574     // The number of parts is not a power of 2.  Split off and copy the tail.
575     assert(PartVT.isInteger() && ValueVT.isInteger() &&
576            "Do not know what to expand to!");
577     unsigned RoundParts = llvm::bit_floor(NumParts);
578     unsigned RoundBits = RoundParts * PartBits;
579     unsigned OddParts = NumParts - RoundParts;
580     SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val,
581       DAG.getShiftAmountConstant(RoundBits, ValueVT, DL));
582 
583     getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V,
584                    CallConv);
585 
586     if (DAG.getDataLayout().isBigEndian())
587       // The odd parts were reversed by getCopyToParts - unreverse them.
588       std::reverse(Parts + RoundParts, Parts + NumParts);
589 
590     NumParts = RoundParts;
591     ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
592     Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
593   }
594 
595   // The number of parts is a power of 2.  Repeatedly bisect the value using
596   // EXTRACT_ELEMENT.
597   Parts[0] = DAG.getNode(ISD::BITCAST, DL,
598                          EVT::getIntegerVT(*DAG.getContext(),
599                                            ValueVT.getSizeInBits()),
600                          Val);
601 
602   for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
603     for (unsigned i = 0; i < NumParts; i += StepSize) {
604       unsigned ThisBits = StepSize * PartBits / 2;
605       EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
606       SDValue &Part0 = Parts[i];
607       SDValue &Part1 = Parts[i+StepSize/2];
608 
609       Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
610                           ThisVT, Part0, DAG.getIntPtrConstant(1, DL));
611       Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
612                           ThisVT, Part0, DAG.getIntPtrConstant(0, DL));
613 
614       if (ThisBits == PartBits && ThisVT != PartVT) {
615         Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0);
616         Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1);
617       }
618     }
619   }
620 
621   if (DAG.getDataLayout().isBigEndian())
622     std::reverse(Parts, Parts + OrigNumParts);
623 }
624 
625 static SDValue widenVectorToPartType(SelectionDAG &DAG, SDValue Val,
626                                      const SDLoc &DL, EVT PartVT) {
627   if (!PartVT.isVector())
628     return SDValue();
629 
630   EVT ValueVT = Val.getValueType();
631   EVT PartEVT = PartVT.getVectorElementType();
632   EVT ValueEVT = ValueVT.getVectorElementType();
633   ElementCount PartNumElts = PartVT.getVectorElementCount();
634   ElementCount ValueNumElts = ValueVT.getVectorElementCount();
635 
636   // We only support widening vectors with equivalent element types and
637   // fixed/scalable properties. If a target needs to widen a fixed-length type
638   // to a scalable one, it should be possible to use INSERT_SUBVECTOR below.
639   if (ElementCount::isKnownLE(PartNumElts, ValueNumElts) ||
640       PartNumElts.isScalable() != ValueNumElts.isScalable())
641     return SDValue();
642 
643   // Have a try for bf16 because some targets share its ABI with fp16.
644   if (ValueEVT == MVT::bf16 && PartEVT == MVT::f16) {
645     assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) &&
646            "Cannot widen to illegal type");
647     Val = DAG.getNode(ISD::BITCAST, DL,
648                       ValueVT.changeVectorElementType(MVT::f16), Val);
649   } else if (PartEVT != ValueEVT) {
650     return SDValue();
651   }
652 
653   // Widening a scalable vector to another scalable vector is done by inserting
654   // the vector into a larger undef one.
655   if (PartNumElts.isScalable())
656     return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
657                        Val, DAG.getVectorIdxConstant(0, DL));
658 
659   // Vector widening case, e.g. <2 x float> -> <4 x float>.  Shuffle in
660   // undef elements.
661   SmallVector<SDValue, 16> Ops;
662   DAG.ExtractVectorElements(Val, Ops);
663   SDValue EltUndef = DAG.getUNDEF(PartEVT);
664   Ops.append((PartNumElts - ValueNumElts).getFixedValue(), EltUndef);
665 
666   // FIXME: Use CONCAT for 2x -> 4x.
667   return DAG.getBuildVector(PartVT, DL, Ops);
668 }
669 
670 /// getCopyToPartsVector - Create a series of nodes that contain the specified
671 /// value split into legal parts.
672 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
673                                  SDValue Val, SDValue *Parts, unsigned NumParts,
674                                  MVT PartVT, const Value *V,
675                                  std::optional<CallingConv::ID> CallConv) {
676   EVT ValueVT = Val.getValueType();
677   assert(ValueVT.isVector() && "Not a vector");
678   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
679   const bool IsABIRegCopy = CallConv.has_value();
680 
681   if (NumParts == 1) {
682     EVT PartEVT = PartVT;
683     if (PartEVT == ValueVT) {
684       // Nothing to do.
685     } else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
686       // Bitconvert vector->vector case.
687       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
688     } else if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, PartVT)) {
689       Val = Widened;
690     } else if (PartVT.isVector() &&
691                PartEVT.getVectorElementType().bitsGE(
692                    ValueVT.getVectorElementType()) &&
693                PartEVT.getVectorElementCount() ==
694                    ValueVT.getVectorElementCount()) {
695 
696       // Promoted vector extract
697       Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
698     } else if (PartEVT.isVector() &&
699                PartEVT.getVectorElementType() !=
700                    ValueVT.getVectorElementType() &&
701                TLI.getTypeAction(*DAG.getContext(), ValueVT) ==
702                    TargetLowering::TypeWidenVector) {
703       // Combination of widening and promotion.
704       EVT WidenVT =
705           EVT::getVectorVT(*DAG.getContext(), ValueVT.getVectorElementType(),
706                            PartVT.getVectorElementCount());
707       SDValue Widened = widenVectorToPartType(DAG, Val, DL, WidenVT);
708       Val = DAG.getAnyExtOrTrunc(Widened, DL, PartVT);
709     } else {
710       // Don't extract an integer from a float vector. This can happen if the
711       // FP type gets softened to integer and then promoted. The promotion
712       // prevents it from being picked up by the earlier bitcast case.
713       if (ValueVT.getVectorElementCount().isScalar() &&
714           (!ValueVT.isFloatingPoint() || !PartVT.isInteger())) {
715         Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, PartVT, Val,
716                           DAG.getVectorIdxConstant(0, DL));
717       } else {
718         uint64_t ValueSize = ValueVT.getFixedSizeInBits();
719         assert(PartVT.getFixedSizeInBits() > ValueSize &&
720                "lossy conversion of vector to scalar type");
721         EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize);
722         Val = DAG.getBitcast(IntermediateType, Val);
723         Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
724       }
725     }
726 
727     assert(Val.getValueType() == PartVT && "Unexpected vector part value type");
728     Parts[0] = Val;
729     return;
730   }
731 
732   // Handle a multi-element vector.
733   EVT IntermediateVT;
734   MVT RegisterVT;
735   unsigned NumIntermediates;
736   unsigned NumRegs;
737   if (IsABIRegCopy) {
738     NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
739         *DAG.getContext(), *CallConv, ValueVT, IntermediateVT, NumIntermediates,
740         RegisterVT);
741   } else {
742     NumRegs =
743         TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
744                                    NumIntermediates, RegisterVT);
745   }
746 
747   assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
748   NumParts = NumRegs; // Silence a compiler warning.
749   assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
750 
751   assert(IntermediateVT.isScalableVector() == ValueVT.isScalableVector() &&
752          "Mixing scalable and fixed vectors when copying in parts");
753 
754   std::optional<ElementCount> DestEltCnt;
755 
756   if (IntermediateVT.isVector())
757     DestEltCnt = IntermediateVT.getVectorElementCount() * NumIntermediates;
758   else
759     DestEltCnt = ElementCount::getFixed(NumIntermediates);
760 
761   EVT BuiltVectorTy = EVT::getVectorVT(
762       *DAG.getContext(), IntermediateVT.getScalarType(), *DestEltCnt);
763 
764   if (ValueVT == BuiltVectorTy) {
765     // Nothing to do.
766   } else if (ValueVT.getSizeInBits() == BuiltVectorTy.getSizeInBits()) {
767     // Bitconvert vector->vector case.
768     Val = DAG.getNode(ISD::BITCAST, DL, BuiltVectorTy, Val);
769   } else {
770     if (BuiltVectorTy.getVectorElementType().bitsGT(
771             ValueVT.getVectorElementType())) {
772       // Integer promotion.
773       ValueVT = EVT::getVectorVT(*DAG.getContext(),
774                                  BuiltVectorTy.getVectorElementType(),
775                                  ValueVT.getVectorElementCount());
776       Val = DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
777     }
778 
779     if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, BuiltVectorTy)) {
780       Val = Widened;
781     }
782   }
783 
784   assert(Val.getValueType() == BuiltVectorTy && "Unexpected vector value type");
785 
786   // Split the vector into intermediate operands.
787   SmallVector<SDValue, 8> Ops(NumIntermediates);
788   for (unsigned i = 0; i != NumIntermediates; ++i) {
789     if (IntermediateVT.isVector()) {
790       // This does something sensible for scalable vectors - see the
791       // definition of EXTRACT_SUBVECTOR for further details.
792       unsigned IntermediateNumElts = IntermediateVT.getVectorMinNumElements();
793       Ops[i] =
794           DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, IntermediateVT, Val,
795                       DAG.getVectorIdxConstant(i * IntermediateNumElts, DL));
796     } else {
797       Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, IntermediateVT, Val,
798                            DAG.getVectorIdxConstant(i, DL));
799     }
800   }
801 
802   // Split the intermediate operands into legal parts.
803   if (NumParts == NumIntermediates) {
804     // If the register was not expanded, promote or copy the value,
805     // as appropriate.
806     for (unsigned i = 0; i != NumParts; ++i)
807       getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V, CallConv);
808   } else if (NumParts > 0) {
809     // If the intermediate type was expanded, split each the value into
810     // legal parts.
811     assert(NumIntermediates != 0 && "division by zero");
812     assert(NumParts % NumIntermediates == 0 &&
813            "Must expand into a divisible number of parts!");
814     unsigned Factor = NumParts / NumIntermediates;
815     for (unsigned i = 0; i != NumIntermediates; ++i)
816       getCopyToParts(DAG, DL, Ops[i], &Parts[i * Factor], Factor, PartVT, V,
817                      CallConv);
818   }
819 }
820 
821 RegsForValue::RegsForValue(const SmallVector<unsigned, 4> &regs, MVT regvt,
822                            EVT valuevt, std::optional<CallingConv::ID> CC)
823     : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs),
824       RegCount(1, regs.size()), CallConv(CC) {}
825 
826 RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
827                            const DataLayout &DL, unsigned Reg, Type *Ty,
828                            std::optional<CallingConv::ID> CC) {
829   ComputeValueVTs(TLI, DL, Ty, ValueVTs);
830 
831   CallConv = CC;
832 
833   for (EVT ValueVT : ValueVTs) {
834     unsigned NumRegs =
835         isABIMangled()
836             ? TLI.getNumRegistersForCallingConv(Context, *CC, ValueVT)
837             : TLI.getNumRegisters(Context, ValueVT);
838     MVT RegisterVT =
839         isABIMangled()
840             ? TLI.getRegisterTypeForCallingConv(Context, *CC, ValueVT)
841             : TLI.getRegisterType(Context, ValueVT);
842     for (unsigned i = 0; i != NumRegs; ++i)
843       Regs.push_back(Reg + i);
844     RegVTs.push_back(RegisterVT);
845     RegCount.push_back(NumRegs);
846     Reg += NumRegs;
847   }
848 }
849 
850 SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
851                                       FunctionLoweringInfo &FuncInfo,
852                                       const SDLoc &dl, SDValue &Chain,
853                                       SDValue *Glue, const Value *V) const {
854   // A Value with type {} or [0 x %t] needs no registers.
855   if (ValueVTs.empty())
856     return SDValue();
857 
858   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
859 
860   // Assemble the legal parts into the final values.
861   SmallVector<SDValue, 4> Values(ValueVTs.size());
862   SmallVector<SDValue, 8> Parts;
863   for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
864     // Copy the legal parts from the registers.
865     EVT ValueVT = ValueVTs[Value];
866     unsigned NumRegs = RegCount[Value];
867     MVT RegisterVT = isABIMangled()
868                          ? TLI.getRegisterTypeForCallingConv(
869                                *DAG.getContext(), *CallConv, RegVTs[Value])
870                          : RegVTs[Value];
871 
872     Parts.resize(NumRegs);
873     for (unsigned i = 0; i != NumRegs; ++i) {
874       SDValue P;
875       if (!Glue) {
876         P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
877       } else {
878         P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Glue);
879         *Glue = P.getValue(2);
880       }
881 
882       Chain = P.getValue(1);
883       Parts[i] = P;
884 
885       // If the source register was virtual and if we know something about it,
886       // add an assert node.
887       if (!Register::isVirtualRegister(Regs[Part + i]) ||
888           !RegisterVT.isInteger())
889         continue;
890 
891       const FunctionLoweringInfo::LiveOutInfo *LOI =
892         FuncInfo.GetLiveOutRegInfo(Regs[Part+i]);
893       if (!LOI)
894         continue;
895 
896       unsigned RegSize = RegisterVT.getScalarSizeInBits();
897       unsigned NumSignBits = LOI->NumSignBits;
898       unsigned NumZeroBits = LOI->Known.countMinLeadingZeros();
899 
900       if (NumZeroBits == RegSize) {
901         // The current value is a zero.
902         // Explicitly express that as it would be easier for
903         // optimizations to kick in.
904         Parts[i] = DAG.getConstant(0, dl, RegisterVT);
905         continue;
906       }
907 
908       // FIXME: We capture more information than the dag can represent.  For
909       // now, just use the tightest assertzext/assertsext possible.
910       bool isSExt;
911       EVT FromVT(MVT::Other);
912       if (NumZeroBits) {
913         FromVT = EVT::getIntegerVT(*DAG.getContext(), RegSize - NumZeroBits);
914         isSExt = false;
915       } else if (NumSignBits > 1) {
916         FromVT =
917             EVT::getIntegerVT(*DAG.getContext(), RegSize - NumSignBits + 1);
918         isSExt = true;
919       } else {
920         continue;
921       }
922       // Add an assertion node.
923       assert(FromVT != MVT::Other);
924       Parts[i] = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
925                              RegisterVT, P, DAG.getValueType(FromVT));
926     }
927 
928     Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(), NumRegs,
929                                      RegisterVT, ValueVT, V, CallConv);
930     Part += NumRegs;
931     Parts.clear();
932   }
933 
934   return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(ValueVTs), Values);
935 }
936 
937 void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG,
938                                  const SDLoc &dl, SDValue &Chain, SDValue *Glue,
939                                  const Value *V,
940                                  ISD::NodeType PreferredExtendType) const {
941   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
942   ISD::NodeType ExtendKind = PreferredExtendType;
943 
944   // Get the list of the values's legal parts.
945   unsigned NumRegs = Regs.size();
946   SmallVector<SDValue, 8> Parts(NumRegs);
947   for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
948     unsigned NumParts = RegCount[Value];
949 
950     MVT RegisterVT = isABIMangled()
951                          ? TLI.getRegisterTypeForCallingConv(
952                                *DAG.getContext(), *CallConv, RegVTs[Value])
953                          : RegVTs[Value];
954 
955     if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT))
956       ExtendKind = ISD::ZERO_EXTEND;
957 
958     getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value), &Parts[Part],
959                    NumParts, RegisterVT, V, CallConv, ExtendKind);
960     Part += NumParts;
961   }
962 
963   // Copy the parts into the registers.
964   SmallVector<SDValue, 8> Chains(NumRegs);
965   for (unsigned i = 0; i != NumRegs; ++i) {
966     SDValue Part;
967     if (!Glue) {
968       Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
969     } else {
970       Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Glue);
971       *Glue = Part.getValue(1);
972     }
973 
974     Chains[i] = Part.getValue(0);
975   }
976 
977   if (NumRegs == 1 || Glue)
978     // If NumRegs > 1 && Glue is used then the use of the last CopyToReg is
979     // flagged to it. That is the CopyToReg nodes and the user are considered
980     // a single scheduling unit. If we create a TokenFactor and return it as
981     // chain, then the TokenFactor is both a predecessor (operand) of the
982     // user as well as a successor (the TF operands are flagged to the user).
983     // c1, f1 = CopyToReg
984     // c2, f2 = CopyToReg
985     // c3     = TokenFactor c1, c2
986     // ...
987     //        = op c3, ..., f2
988     Chain = Chains[NumRegs-1];
989   else
990     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
991 }
992 
993 void RegsForValue::AddInlineAsmOperands(InlineAsm::Kind Code, bool HasMatching,
994                                         unsigned MatchingIdx, const SDLoc &dl,
995                                         SelectionDAG &DAG,
996                                         std::vector<SDValue> &Ops) const {
997   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
998 
999   InlineAsm::Flag Flag(Code, Regs.size());
1000   if (HasMatching)
1001     Flag.setMatchingOp(MatchingIdx);
1002   else if (!Regs.empty() && Register::isVirtualRegister(Regs.front())) {
1003     // Put the register class of the virtual registers in the flag word.  That
1004     // way, later passes can recompute register class constraints for inline
1005     // assembly as well as normal instructions.
1006     // Don't do this for tied operands that can use the regclass information
1007     // from the def.
1008     const MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
1009     const TargetRegisterClass *RC = MRI.getRegClass(Regs.front());
1010     Flag.setRegClass(RC->getID());
1011   }
1012 
1013   SDValue Res = DAG.getTargetConstant(Flag, dl, MVT::i32);
1014   Ops.push_back(Res);
1015 
1016   if (Code == InlineAsm::Kind::Clobber) {
1017     // Clobbers should always have a 1:1 mapping with registers, and may
1018     // reference registers that have illegal (e.g. vector) types. Hence, we
1019     // shouldn't try to apply any sort of splitting logic to them.
1020     assert(Regs.size() == RegVTs.size() && Regs.size() == ValueVTs.size() &&
1021            "No 1:1 mapping from clobbers to regs?");
1022     Register SP = TLI.getStackPointerRegisterToSaveRestore();
1023     (void)SP;
1024     for (unsigned I = 0, E = ValueVTs.size(); I != E; ++I) {
1025       Ops.push_back(DAG.getRegister(Regs[I], RegVTs[I]));
1026       assert(
1027           (Regs[I] != SP ||
1028            DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment()) &&
1029           "If we clobbered the stack pointer, MFI should know about it.");
1030     }
1031     return;
1032   }
1033 
1034   for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
1035     MVT RegisterVT = RegVTs[Value];
1036     unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value],
1037                                            RegisterVT);
1038     for (unsigned i = 0; i != NumRegs; ++i) {
1039       assert(Reg < Regs.size() && "Mismatch in # registers expected");
1040       unsigned TheReg = Regs[Reg++];
1041       Ops.push_back(DAG.getRegister(TheReg, RegisterVT));
1042     }
1043   }
1044 }
1045 
1046 SmallVector<std::pair<unsigned, TypeSize>, 4>
1047 RegsForValue::getRegsAndSizes() const {
1048   SmallVector<std::pair<unsigned, TypeSize>, 4> OutVec;
1049   unsigned I = 0;
1050   for (auto CountAndVT : zip_first(RegCount, RegVTs)) {
1051     unsigned RegCount = std::get<0>(CountAndVT);
1052     MVT RegisterVT = std::get<1>(CountAndVT);
1053     TypeSize RegisterSize = RegisterVT.getSizeInBits();
1054     for (unsigned E = I + RegCount; I != E; ++I)
1055       OutVec.push_back(std::make_pair(Regs[I], RegisterSize));
1056   }
1057   return OutVec;
1058 }
1059 
1060 void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis *aa,
1061                                AssumptionCache *ac,
1062                                const TargetLibraryInfo *li) {
1063   AA = aa;
1064   AC = ac;
1065   GFI = gfi;
1066   LibInfo = li;
1067   Context = DAG.getContext();
1068   LPadToCallSiteMap.clear();
1069   SL->init(DAG.getTargetLoweringInfo(), TM, DAG.getDataLayout());
1070   AssignmentTrackingEnabled = isAssignmentTrackingEnabled(
1071       *DAG.getMachineFunction().getFunction().getParent());
1072 }
1073 
1074 void SelectionDAGBuilder::clear() {
1075   NodeMap.clear();
1076   UnusedArgNodeMap.clear();
1077   PendingLoads.clear();
1078   PendingExports.clear();
1079   PendingConstrainedFP.clear();
1080   PendingConstrainedFPStrict.clear();
1081   CurInst = nullptr;
1082   HasTailCall = false;
1083   SDNodeOrder = LowestSDNodeOrder;
1084   StatepointLowering.clear();
1085 }
1086 
1087 void SelectionDAGBuilder::clearDanglingDebugInfo() {
1088   DanglingDebugInfoMap.clear();
1089 }
1090 
1091 // Update DAG root to include dependencies on Pending chains.
1092 SDValue SelectionDAGBuilder::updateRoot(SmallVectorImpl<SDValue> &Pending) {
1093   SDValue Root = DAG.getRoot();
1094 
1095   if (Pending.empty())
1096     return Root;
1097 
1098   // Add current root to PendingChains, unless we already indirectly
1099   // depend on it.
1100   if (Root.getOpcode() != ISD::EntryToken) {
1101     unsigned i = 0, e = Pending.size();
1102     for (; i != e; ++i) {
1103       assert(Pending[i].getNode()->getNumOperands() > 1);
1104       if (Pending[i].getNode()->getOperand(0) == Root)
1105         break;  // Don't add the root if we already indirectly depend on it.
1106     }
1107 
1108     if (i == e)
1109       Pending.push_back(Root);
1110   }
1111 
1112   if (Pending.size() == 1)
1113     Root = Pending[0];
1114   else
1115     Root = DAG.getTokenFactor(getCurSDLoc(), Pending);
1116 
1117   DAG.setRoot(Root);
1118   Pending.clear();
1119   return Root;
1120 }
1121 
1122 SDValue SelectionDAGBuilder::getMemoryRoot() {
1123   return updateRoot(PendingLoads);
1124 }
1125 
1126 SDValue SelectionDAGBuilder::getRoot() {
1127   // Chain up all pending constrained intrinsics together with all
1128   // pending loads, by simply appending them to PendingLoads and
1129   // then calling getMemoryRoot().
1130   PendingLoads.reserve(PendingLoads.size() +
1131                        PendingConstrainedFP.size() +
1132                        PendingConstrainedFPStrict.size());
1133   PendingLoads.append(PendingConstrainedFP.begin(),
1134                       PendingConstrainedFP.end());
1135   PendingLoads.append(PendingConstrainedFPStrict.begin(),
1136                       PendingConstrainedFPStrict.end());
1137   PendingConstrainedFP.clear();
1138   PendingConstrainedFPStrict.clear();
1139   return getMemoryRoot();
1140 }
1141 
1142 SDValue SelectionDAGBuilder::getControlRoot() {
1143   // We need to emit pending fpexcept.strict constrained intrinsics,
1144   // so append them to the PendingExports list.
1145   PendingExports.append(PendingConstrainedFPStrict.begin(),
1146                         PendingConstrainedFPStrict.end());
1147   PendingConstrainedFPStrict.clear();
1148   return updateRoot(PendingExports);
1149 }
1150 
1151 void SelectionDAGBuilder::visitDbgInfo(const Instruction &I) {
1152   // Add SDDbgValue nodes for any var locs here. Do so before updating
1153   // SDNodeOrder, as this mapping is {Inst -> Locs BEFORE Inst}.
1154   if (FunctionVarLocs const *FnVarLocs = DAG.getFunctionVarLocs()) {
1155     // Add SDDbgValue nodes for any var locs here. Do so before updating
1156     // SDNodeOrder, as this mapping is {Inst -> Locs BEFORE Inst}.
1157     for (auto It = FnVarLocs->locs_begin(&I), End = FnVarLocs->locs_end(&I);
1158          It != End; ++It) {
1159       auto *Var = FnVarLocs->getDILocalVariable(It->VariableID);
1160       dropDanglingDebugInfo(Var, It->Expr);
1161       if (It->Values.isKillLocation(It->Expr)) {
1162         handleKillDebugValue(Var, It->Expr, It->DL, SDNodeOrder);
1163         continue;
1164       }
1165       SmallVector<Value *> Values(It->Values.location_ops());
1166       if (!handleDebugValue(Values, Var, It->Expr, It->DL, SDNodeOrder,
1167                             It->Values.hasArgList())) {
1168         SmallVector<Value *, 4> Vals;
1169         for (Value *V : It->Values.location_ops())
1170           Vals.push_back(V);
1171         addDanglingDebugInfo(Vals,
1172                              FnVarLocs->getDILocalVariable(It->VariableID),
1173                              It->Expr, Vals.size() > 1, It->DL, SDNodeOrder);
1174       }
1175     }
1176   }
1177 
1178   // Is there is any debug-info attached to this instruction, in the form of
1179   // DPValue non-instruction debug-info records.
1180   for (DPValue &DPV : I.getDbgValueRange()) {
1181     DILocalVariable *Variable = DPV.getVariable();
1182     DIExpression *Expression = DPV.getExpression();
1183     dropDanglingDebugInfo(Variable, Expression);
1184 
1185     // A DPValue with no locations is a kill location.
1186     SmallVector<Value *, 4> Values(DPV.location_ops());
1187     if (Values.empty()) {
1188       handleKillDebugValue(Variable, Expression, DPV.getDebugLoc(),
1189                            SDNodeOrder);
1190       continue;
1191     }
1192 
1193     // A DPValue with an undef or absent location is also a kill location.
1194     if (llvm::any_of(Values,
1195                      [](Value *V) { return !V || isa<UndefValue>(V); })) {
1196       handleKillDebugValue(Variable, Expression, DPV.getDebugLoc(),
1197                            SDNodeOrder);
1198       continue;
1199     }
1200 
1201     bool IsVariadic = DPV.hasArgList();
1202     if (!handleDebugValue(Values, Variable, Expression, DPV.getDebugLoc(),
1203                           SDNodeOrder, IsVariadic)) {
1204       addDanglingDebugInfo(Values, Variable, Expression, IsVariadic,
1205                            DPV.getDebugLoc(), SDNodeOrder);
1206     }
1207   }
1208 }
1209 
1210 void SelectionDAGBuilder::visit(const Instruction &I) {
1211   visitDbgInfo(I);
1212 
1213   // Set up outgoing PHI node register values before emitting the terminator.
1214   if (I.isTerminator()) {
1215     HandlePHINodesInSuccessorBlocks(I.getParent());
1216   }
1217 
1218   // Increase the SDNodeOrder if dealing with a non-debug instruction.
1219   if (!isa<DbgInfoIntrinsic>(I))
1220     ++SDNodeOrder;
1221 
1222   CurInst = &I;
1223 
1224   // Set inserted listener only if required.
1225   bool NodeInserted = false;
1226   std::unique_ptr<SelectionDAG::DAGNodeInsertedListener> InsertedListener;
1227   MDNode *PCSectionsMD = I.getMetadata(LLVMContext::MD_pcsections);
1228   if (PCSectionsMD) {
1229     InsertedListener = std::make_unique<SelectionDAG::DAGNodeInsertedListener>(
1230         DAG, [&](SDNode *) { NodeInserted = true; });
1231   }
1232 
1233   visit(I.getOpcode(), I);
1234 
1235   if (!I.isTerminator() && !HasTailCall &&
1236       !isa<GCStatepointInst>(I)) // statepoints handle their exports internally
1237     CopyToExportRegsIfNeeded(&I);
1238 
1239   // Handle metadata.
1240   if (PCSectionsMD) {
1241     auto It = NodeMap.find(&I);
1242     if (It != NodeMap.end()) {
1243       DAG.addPCSections(It->second.getNode(), PCSectionsMD);
1244     } else if (NodeInserted) {
1245       // This should not happen; if it does, don't let it go unnoticed so we can
1246       // fix it. Relevant visit*() function is probably missing a setValue().
1247       errs() << "warning: loosing !pcsections metadata ["
1248              << I.getModule()->getName() << "]\n";
1249       LLVM_DEBUG(I.dump());
1250       assert(false);
1251     }
1252   }
1253 
1254   CurInst = nullptr;
1255 }
1256 
1257 void SelectionDAGBuilder::visitPHI(const PHINode &) {
1258   llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!");
1259 }
1260 
1261 void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) {
1262   // Note: this doesn't use InstVisitor, because it has to work with
1263   // ConstantExpr's in addition to instructions.
1264   switch (Opcode) {
1265   default: llvm_unreachable("Unknown instruction type encountered!");
1266     // Build the switch statement using the Instruction.def file.
1267 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1268     case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
1269 #include "llvm/IR/Instruction.def"
1270   }
1271 }
1272 
1273 static bool handleDanglingVariadicDebugInfo(SelectionDAG &DAG,
1274                                             DILocalVariable *Variable,
1275                                             DebugLoc DL, unsigned Order,
1276                                             SmallVectorImpl<Value *> &Values,
1277                                             DIExpression *Expression) {
1278   // For variadic dbg_values we will now insert an undef.
1279   // FIXME: We can potentially recover these!
1280   SmallVector<SDDbgOperand, 2> Locs;
1281   for (const Value *V : Values) {
1282     auto *Undef = UndefValue::get(V->getType());
1283     Locs.push_back(SDDbgOperand::fromConst(Undef));
1284   }
1285   SDDbgValue *SDV = DAG.getDbgValueList(Variable, Expression, Locs, {},
1286                                         /*IsIndirect=*/false, DL, Order,
1287                                         /*IsVariadic=*/true);
1288   DAG.AddDbgValue(SDV, /*isParameter=*/false);
1289   return true;
1290 }
1291 
1292 void SelectionDAGBuilder::addDanglingDebugInfo(SmallVectorImpl<Value *> &Values,
1293                                                DILocalVariable *Var,
1294                                                DIExpression *Expr,
1295                                                bool IsVariadic, DebugLoc DL,
1296                                                unsigned Order) {
1297   if (IsVariadic) {
1298     handleDanglingVariadicDebugInfo(DAG, Var, DL, Order, Values, Expr);
1299     return;
1300   }
1301   // TODO: Dangling debug info will eventually either be resolved or produce
1302   // an Undef DBG_VALUE. However in the resolution case, a gap may appear
1303   // between the original dbg.value location and its resolved DBG_VALUE,
1304   // which we should ideally fill with an extra Undef DBG_VALUE.
1305   assert(Values.size() == 1);
1306   DanglingDebugInfoMap[Values[0]].emplace_back(Var, Expr, DL, Order);
1307 }
1308 
1309 void SelectionDAGBuilder::dropDanglingDebugInfo(const DILocalVariable *Variable,
1310                                                 const DIExpression *Expr) {
1311   auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) {
1312     DIVariable *DanglingVariable = DDI.getVariable();
1313     DIExpression *DanglingExpr = DDI.getExpression();
1314     if (DanglingVariable == Variable && Expr->fragmentsOverlap(DanglingExpr)) {
1315       LLVM_DEBUG(dbgs() << "Dropping dangling debug info for "
1316                         << printDDI(nullptr, DDI) << "\n");
1317       return true;
1318     }
1319     return false;
1320   };
1321 
1322   for (auto &DDIMI : DanglingDebugInfoMap) {
1323     DanglingDebugInfoVector &DDIV = DDIMI.second;
1324 
1325     // If debug info is to be dropped, run it through final checks to see
1326     // whether it can be salvaged.
1327     for (auto &DDI : DDIV)
1328       if (isMatchingDbgValue(DDI))
1329         salvageUnresolvedDbgValue(DDIMI.first, DDI);
1330 
1331     erase_if(DDIV, isMatchingDbgValue);
1332   }
1333 }
1334 
1335 // resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V,
1336 // generate the debug data structures now that we've seen its definition.
1337 void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V,
1338                                                    SDValue Val) {
1339   auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V);
1340   if (DanglingDbgInfoIt == DanglingDebugInfoMap.end())
1341     return;
1342 
1343   DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
1344   for (auto &DDI : DDIV) {
1345     DebugLoc DL = DDI.getDebugLoc();
1346     unsigned ValSDNodeOrder = Val.getNode()->getIROrder();
1347     unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
1348     DILocalVariable *Variable = DDI.getVariable();
1349     DIExpression *Expr = DDI.getExpression();
1350     assert(Variable->isValidLocationForIntrinsic(DL) &&
1351            "Expected inlined-at fields to agree");
1352     SDDbgValue *SDV;
1353     if (Val.getNode()) {
1354       // FIXME: I doubt that it is correct to resolve a dangling DbgValue as a
1355       // FuncArgumentDbgValue (it would be hoisted to the function entry, and if
1356       // we couldn't resolve it directly when examining the DbgValue intrinsic
1357       // in the first place we should not be more successful here). Unless we
1358       // have some test case that prove this to be correct we should avoid
1359       // calling EmitFuncArgumentDbgValue here.
1360       if (!EmitFuncArgumentDbgValue(V, Variable, Expr, DL,
1361                                     FuncArgumentDbgValueKind::Value, Val)) {
1362         LLVM_DEBUG(dbgs() << "Resolve dangling debug info for "
1363                           << printDDI(V, DDI) << "\n");
1364         LLVM_DEBUG(dbgs() << "  By mapping to:\n    "; Val.dump());
1365         // Increase the SDNodeOrder for the DbgValue here to make sure it is
1366         // inserted after the definition of Val when emitting the instructions
1367         // after ISel. An alternative could be to teach
1368         // ScheduleDAGSDNodes::EmitSchedule to delay the insertion properly.
1369         LLVM_DEBUG(if (ValSDNodeOrder > DbgSDNodeOrder) dbgs()
1370                    << "changing SDNodeOrder from " << DbgSDNodeOrder << " to "
1371                    << ValSDNodeOrder << "\n");
1372         SDV = getDbgValue(Val, Variable, Expr, DL,
1373                           std::max(DbgSDNodeOrder, ValSDNodeOrder));
1374         DAG.AddDbgValue(SDV, false);
1375       } else
1376         LLVM_DEBUG(dbgs() << "Resolved dangling debug info for "
1377                           << printDDI(V, DDI)
1378                           << " in EmitFuncArgumentDbgValue\n");
1379     } else {
1380       LLVM_DEBUG(dbgs() << "Dropping debug info for " << printDDI(V, DDI)
1381                         << "\n");
1382       auto Undef = UndefValue::get(V->getType());
1383       auto SDV =
1384           DAG.getConstantDbgValue(Variable, Expr, Undef, DL, DbgSDNodeOrder);
1385       DAG.AddDbgValue(SDV, false);
1386     }
1387   }
1388   DDIV.clear();
1389 }
1390 
1391 void SelectionDAGBuilder::salvageUnresolvedDbgValue(const Value *V,
1392                                                     DanglingDebugInfo &DDI) {
1393   // TODO: For the variadic implementation, instead of only checking the fail
1394   // state of `handleDebugValue`, we need know specifically which values were
1395   // invalid, so that we attempt to salvage only those values when processing
1396   // a DIArgList.
1397   const Value *OrigV = V;
1398   DILocalVariable *Var = DDI.getVariable();
1399   DIExpression *Expr = DDI.getExpression();
1400   DebugLoc DL = DDI.getDebugLoc();
1401   unsigned SDOrder = DDI.getSDNodeOrder();
1402 
1403   // Currently we consider only dbg.value intrinsics -- we tell the salvager
1404   // that DW_OP_stack_value is desired.
1405   bool StackValue = true;
1406 
1407   // Can this Value can be encoded without any further work?
1408   if (handleDebugValue(V, Var, Expr, DL, SDOrder, /*IsVariadic=*/false))
1409     return;
1410 
1411   // Attempt to salvage back through as many instructions as possible. Bail if
1412   // a non-instruction is seen, such as a constant expression or global
1413   // variable. FIXME: Further work could recover those too.
1414   while (isa<Instruction>(V)) {
1415     const Instruction &VAsInst = *cast<const Instruction>(V);
1416     // Temporary "0", awaiting real implementation.
1417     SmallVector<uint64_t, 16> Ops;
1418     SmallVector<Value *, 4> AdditionalValues;
1419     V = salvageDebugInfoImpl(const_cast<Instruction &>(VAsInst),
1420                              Expr->getNumLocationOperands(), Ops,
1421                              AdditionalValues);
1422     // If we cannot salvage any further, and haven't yet found a suitable debug
1423     // expression, bail out.
1424     if (!V)
1425       break;
1426 
1427     // TODO: If AdditionalValues isn't empty, then the salvage can only be
1428     // represented with a DBG_VALUE_LIST, so we give up. When we have support
1429     // here for variadic dbg_values, remove that condition.
1430     if (!AdditionalValues.empty())
1431       break;
1432 
1433     // New value and expr now represent this debuginfo.
1434     Expr = DIExpression::appendOpsToArg(Expr, Ops, 0, StackValue);
1435 
1436     // Some kind of simplification occurred: check whether the operand of the
1437     // salvaged debug expression can be encoded in this DAG.
1438     if (handleDebugValue(V, Var, Expr, DL, SDOrder, /*IsVariadic=*/false)) {
1439       LLVM_DEBUG(
1440           dbgs() << "Salvaged debug location info for:\n  " << *Var << "\n"
1441                  << *OrigV << "\nBy stripping back to:\n  " << *V << "\n");
1442       return;
1443     }
1444   }
1445 
1446   // This was the final opportunity to salvage this debug information, and it
1447   // couldn't be done. Place an undef DBG_VALUE at this location to terminate
1448   // any earlier variable location.
1449   assert(OrigV && "V shouldn't be null");
1450   auto *Undef = UndefValue::get(OrigV->getType());
1451   auto *SDV = DAG.getConstantDbgValue(Var, Expr, Undef, DL, SDNodeOrder);
1452   DAG.AddDbgValue(SDV, false);
1453   LLVM_DEBUG(dbgs() << "Dropping debug value info for:\n  "
1454                     << printDDI(OrigV, DDI) << "\n");
1455 }
1456 
1457 void SelectionDAGBuilder::handleKillDebugValue(DILocalVariable *Var,
1458                                                DIExpression *Expr,
1459                                                DebugLoc DbgLoc,
1460                                                unsigned Order) {
1461   Value *Poison = PoisonValue::get(Type::getInt1Ty(*Context));
1462   DIExpression *NewExpr =
1463       const_cast<DIExpression *>(DIExpression::convertToUndefExpression(Expr));
1464   handleDebugValue(Poison, Var, NewExpr, DbgLoc, Order,
1465                    /*IsVariadic*/ false);
1466 }
1467 
1468 bool SelectionDAGBuilder::handleDebugValue(ArrayRef<const Value *> Values,
1469                                            DILocalVariable *Var,
1470                                            DIExpression *Expr, DebugLoc DbgLoc,
1471                                            unsigned Order, bool IsVariadic) {
1472   if (Values.empty())
1473     return true;
1474   SmallVector<SDDbgOperand> LocationOps;
1475   SmallVector<SDNode *> Dependencies;
1476   for (const Value *V : Values) {
1477     // Constant value.
1478     if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V) ||
1479         isa<ConstantPointerNull>(V)) {
1480       LocationOps.emplace_back(SDDbgOperand::fromConst(V));
1481       continue;
1482     }
1483 
1484     // Look through IntToPtr constants.
1485     if (auto *CE = dyn_cast<ConstantExpr>(V))
1486       if (CE->getOpcode() == Instruction::IntToPtr) {
1487         LocationOps.emplace_back(SDDbgOperand::fromConst(CE->getOperand(0)));
1488         continue;
1489       }
1490 
1491     // If the Value is a frame index, we can create a FrameIndex debug value
1492     // without relying on the DAG at all.
1493     if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1494       auto SI = FuncInfo.StaticAllocaMap.find(AI);
1495       if (SI != FuncInfo.StaticAllocaMap.end()) {
1496         LocationOps.emplace_back(SDDbgOperand::fromFrameIdx(SI->second));
1497         continue;
1498       }
1499     }
1500 
1501     // Do not use getValue() in here; we don't want to generate code at
1502     // this point if it hasn't been done yet.
1503     SDValue N = NodeMap[V];
1504     if (!N.getNode() && isa<Argument>(V)) // Check unused arguments map.
1505       N = UnusedArgNodeMap[V];
1506     if (N.getNode()) {
1507       // Only emit func arg dbg value for non-variadic dbg.values for now.
1508       if (!IsVariadic &&
1509           EmitFuncArgumentDbgValue(V, Var, Expr, DbgLoc,
1510                                    FuncArgumentDbgValueKind::Value, N))
1511         return true;
1512       if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) {
1513         // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can
1514         // describe stack slot locations.
1515         //
1516         // Consider "int x = 0; int *px = &x;". There are two kinds of
1517         // interesting debug values here after optimization:
1518         //
1519         //   dbg.value(i32* %px, !"int *px", !DIExpression()), and
1520         //   dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref))
1521         //
1522         // Both describe the direct values of their associated variables.
1523         Dependencies.push_back(N.getNode());
1524         LocationOps.emplace_back(SDDbgOperand::fromFrameIdx(FISDN->getIndex()));
1525         continue;
1526       }
1527       LocationOps.emplace_back(
1528           SDDbgOperand::fromNode(N.getNode(), N.getResNo()));
1529       continue;
1530     }
1531 
1532     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1533     // Special rules apply for the first dbg.values of parameter variables in a
1534     // function. Identify them by the fact they reference Argument Values, that
1535     // they're parameters, and they are parameters of the current function. We
1536     // need to let them dangle until they get an SDNode.
1537     bool IsParamOfFunc =
1538         isa<Argument>(V) && Var->isParameter() && !DbgLoc.getInlinedAt();
1539     if (IsParamOfFunc)
1540       return false;
1541 
1542     // The value is not used in this block yet (or it would have an SDNode).
1543     // We still want the value to appear for the user if possible -- if it has
1544     // an associated VReg, we can refer to that instead.
1545     auto VMI = FuncInfo.ValueMap.find(V);
1546     if (VMI != FuncInfo.ValueMap.end()) {
1547       unsigned Reg = VMI->second;
1548       // If this is a PHI node, it may be split up into several MI PHI nodes
1549       // (in FunctionLoweringInfo::set).
1550       RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg,
1551                        V->getType(), std::nullopt);
1552       if (RFV.occupiesMultipleRegs()) {
1553         // FIXME: We could potentially support variadic dbg_values here.
1554         if (IsVariadic)
1555           return false;
1556         unsigned Offset = 0;
1557         unsigned BitsToDescribe = 0;
1558         if (auto VarSize = Var->getSizeInBits())
1559           BitsToDescribe = *VarSize;
1560         if (auto Fragment = Expr->getFragmentInfo())
1561           BitsToDescribe = Fragment->SizeInBits;
1562         for (const auto &RegAndSize : RFV.getRegsAndSizes()) {
1563           // Bail out if all bits are described already.
1564           if (Offset >= BitsToDescribe)
1565             break;
1566           // TODO: handle scalable vectors.
1567           unsigned RegisterSize = RegAndSize.second;
1568           unsigned FragmentSize = (Offset + RegisterSize > BitsToDescribe)
1569                                       ? BitsToDescribe - Offset
1570                                       : RegisterSize;
1571           auto FragmentExpr = DIExpression::createFragmentExpression(
1572               Expr, Offset, FragmentSize);
1573           if (!FragmentExpr)
1574             continue;
1575           SDDbgValue *SDV = DAG.getVRegDbgValue(
1576               Var, *FragmentExpr, RegAndSize.first, false, DbgLoc, SDNodeOrder);
1577           DAG.AddDbgValue(SDV, false);
1578           Offset += RegisterSize;
1579         }
1580         return true;
1581       }
1582       // We can use simple vreg locations for variadic dbg_values as well.
1583       LocationOps.emplace_back(SDDbgOperand::fromVReg(Reg));
1584       continue;
1585     }
1586     // We failed to create a SDDbgOperand for V.
1587     return false;
1588   }
1589 
1590   // We have created a SDDbgOperand for each Value in Values.
1591   // Should use Order instead of SDNodeOrder?
1592   assert(!LocationOps.empty());
1593   SDDbgValue *SDV = DAG.getDbgValueList(Var, Expr, LocationOps, Dependencies,
1594                                         /*IsIndirect=*/false, DbgLoc,
1595                                         SDNodeOrder, IsVariadic);
1596   DAG.AddDbgValue(SDV, /*isParameter=*/false);
1597   return true;
1598 }
1599 
1600 void SelectionDAGBuilder::resolveOrClearDbgInfo() {
1601   // Try to fixup any remaining dangling debug info -- and drop it if we can't.
1602   for (auto &Pair : DanglingDebugInfoMap)
1603     for (auto &DDI : Pair.second)
1604       salvageUnresolvedDbgValue(const_cast<Value *>(Pair.first), DDI);
1605   clearDanglingDebugInfo();
1606 }
1607 
1608 /// getCopyFromRegs - If there was virtual register allocated for the value V
1609 /// emit CopyFromReg of the specified type Ty. Return empty SDValue() otherwise.
1610 SDValue SelectionDAGBuilder::getCopyFromRegs(const Value *V, Type *Ty) {
1611   DenseMap<const Value *, Register>::iterator It = FuncInfo.ValueMap.find(V);
1612   SDValue Result;
1613 
1614   if (It != FuncInfo.ValueMap.end()) {
1615     Register InReg = It->second;
1616 
1617     RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
1618                      DAG.getDataLayout(), InReg, Ty,
1619                      std::nullopt); // This is not an ABI copy.
1620     SDValue Chain = DAG.getEntryNode();
1621     Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr,
1622                                  V);
1623     resolveDanglingDebugInfo(V, Result);
1624   }
1625 
1626   return Result;
1627 }
1628 
1629 /// getValue - Return an SDValue for the given Value.
1630 SDValue SelectionDAGBuilder::getValue(const Value *V) {
1631   // If we already have an SDValue for this value, use it. It's important
1632   // to do this first, so that we don't create a CopyFromReg if we already
1633   // have a regular SDValue.
1634   SDValue &N = NodeMap[V];
1635   if (N.getNode()) return N;
1636 
1637   // If there's a virtual register allocated and initialized for this
1638   // value, use it.
1639   if (SDValue copyFromReg = getCopyFromRegs(V, V->getType()))
1640     return copyFromReg;
1641 
1642   // Otherwise create a new SDValue and remember it.
1643   SDValue Val = getValueImpl(V);
1644   NodeMap[V] = Val;
1645   resolveDanglingDebugInfo(V, Val);
1646   return Val;
1647 }
1648 
1649 /// getNonRegisterValue - Return an SDValue for the given Value, but
1650 /// don't look in FuncInfo.ValueMap for a virtual register.
1651 SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) {
1652   // If we already have an SDValue for this value, use it.
1653   SDValue &N = NodeMap[V];
1654   if (N.getNode()) {
1655     if (isIntOrFPConstant(N)) {
1656       // Remove the debug location from the node as the node is about to be used
1657       // in a location which may differ from the original debug location.  This
1658       // is relevant to Constant and ConstantFP nodes because they can appear
1659       // as constant expressions inside PHI nodes.
1660       N->setDebugLoc(DebugLoc());
1661     }
1662     return N;
1663   }
1664 
1665   // Otherwise create a new SDValue and remember it.
1666   SDValue Val = getValueImpl(V);
1667   NodeMap[V] = Val;
1668   resolveDanglingDebugInfo(V, Val);
1669   return Val;
1670 }
1671 
1672 /// getValueImpl - Helper function for getValue and getNonRegisterValue.
1673 /// Create an SDValue for the given value.
1674 SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
1675   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1676 
1677   if (const Constant *C = dyn_cast<Constant>(V)) {
1678     EVT VT = TLI.getValueType(DAG.getDataLayout(), V->getType(), true);
1679 
1680     if (const ConstantInt *CI = dyn_cast<ConstantInt>(C))
1681       return DAG.getConstant(*CI, getCurSDLoc(), VT);
1682 
1683     if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
1684       return DAG.getGlobalAddress(GV, getCurSDLoc(), VT);
1685 
1686     if (isa<ConstantPointerNull>(C)) {
1687       unsigned AS = V->getType()->getPointerAddressSpace();
1688       return DAG.getConstant(0, getCurSDLoc(),
1689                              TLI.getPointerTy(DAG.getDataLayout(), AS));
1690     }
1691 
1692     if (match(C, m_VScale()))
1693       return DAG.getVScale(getCurSDLoc(), VT, APInt(VT.getSizeInBits(), 1));
1694 
1695     if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
1696       return DAG.getConstantFP(*CFP, getCurSDLoc(), VT);
1697 
1698     if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
1699       return DAG.getUNDEF(VT);
1700 
1701     if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
1702       visit(CE->getOpcode(), *CE);
1703       SDValue N1 = NodeMap[V];
1704       assert(N1.getNode() && "visit didn't populate the NodeMap!");
1705       return N1;
1706     }
1707 
1708     if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
1709       SmallVector<SDValue, 4> Constants;
1710       for (const Use &U : C->operands()) {
1711         SDNode *Val = getValue(U).getNode();
1712         // If the operand is an empty aggregate, there are no values.
1713         if (!Val) continue;
1714         // Add each leaf value from the operand to the Constants list
1715         // to form a flattened list of all the values.
1716         for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1717           Constants.push_back(SDValue(Val, i));
1718       }
1719 
1720       return DAG.getMergeValues(Constants, getCurSDLoc());
1721     }
1722 
1723     if (const ConstantDataSequential *CDS =
1724           dyn_cast<ConstantDataSequential>(C)) {
1725       SmallVector<SDValue, 4> Ops;
1726       for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1727         SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode();
1728         // Add each leaf value from the operand to the Constants list
1729         // to form a flattened list of all the values.
1730         for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1731           Ops.push_back(SDValue(Val, i));
1732       }
1733 
1734       if (isa<ArrayType>(CDS->getType()))
1735         return DAG.getMergeValues(Ops, getCurSDLoc());
1736       return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1737     }
1738 
1739     if (C->getType()->isStructTy() || C->getType()->isArrayTy()) {
1740       assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
1741              "Unknown struct or array constant!");
1742 
1743       SmallVector<EVT, 4> ValueVTs;
1744       ComputeValueVTs(TLI, DAG.getDataLayout(), C->getType(), ValueVTs);
1745       unsigned NumElts = ValueVTs.size();
1746       if (NumElts == 0)
1747         return SDValue(); // empty struct
1748       SmallVector<SDValue, 4> Constants(NumElts);
1749       for (unsigned i = 0; i != NumElts; ++i) {
1750         EVT EltVT = ValueVTs[i];
1751         if (isa<UndefValue>(C))
1752           Constants[i] = DAG.getUNDEF(EltVT);
1753         else if (EltVT.isFloatingPoint())
1754           Constants[i] = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1755         else
1756           Constants[i] = DAG.getConstant(0, getCurSDLoc(), EltVT);
1757       }
1758 
1759       return DAG.getMergeValues(Constants, getCurSDLoc());
1760     }
1761 
1762     if (const BlockAddress *BA = dyn_cast<BlockAddress>(C))
1763       return DAG.getBlockAddress(BA, VT);
1764 
1765     if (const auto *Equiv = dyn_cast<DSOLocalEquivalent>(C))
1766       return getValue(Equiv->getGlobalValue());
1767 
1768     if (const auto *NC = dyn_cast<NoCFIValue>(C))
1769       return getValue(NC->getGlobalValue());
1770 
1771     if (VT == MVT::aarch64svcount) {
1772       assert(C->isNullValue() && "Can only zero this target type!");
1773       return DAG.getNode(ISD::BITCAST, getCurSDLoc(), VT,
1774                          DAG.getConstant(0, getCurSDLoc(), MVT::nxv16i1));
1775     }
1776 
1777     VectorType *VecTy = cast<VectorType>(V->getType());
1778 
1779     // Now that we know the number and type of the elements, get that number of
1780     // elements into the Ops array based on what kind of constant it is.
1781     if (const ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
1782       SmallVector<SDValue, 16> Ops;
1783       unsigned NumElements = cast<FixedVectorType>(VecTy)->getNumElements();
1784       for (unsigned i = 0; i != NumElements; ++i)
1785         Ops.push_back(getValue(CV->getOperand(i)));
1786 
1787       return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1788     }
1789 
1790     if (isa<ConstantAggregateZero>(C)) {
1791       EVT EltVT =
1792           TLI.getValueType(DAG.getDataLayout(), VecTy->getElementType());
1793 
1794       SDValue Op;
1795       if (EltVT.isFloatingPoint())
1796         Op = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1797       else
1798         Op = DAG.getConstant(0, getCurSDLoc(), EltVT);
1799 
1800       return NodeMap[V] = DAG.getSplat(VT, getCurSDLoc(), Op);
1801     }
1802 
1803     llvm_unreachable("Unknown vector constant");
1804   }
1805 
1806   // If this is a static alloca, generate it as the frameindex instead of
1807   // computation.
1808   if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1809     DenseMap<const AllocaInst*, int>::iterator SI =
1810       FuncInfo.StaticAllocaMap.find(AI);
1811     if (SI != FuncInfo.StaticAllocaMap.end())
1812       return DAG.getFrameIndex(
1813           SI->second, TLI.getValueType(DAG.getDataLayout(), AI->getType()));
1814   }
1815 
1816   // If this is an instruction which fast-isel has deferred, select it now.
1817   if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
1818     Register InReg = FuncInfo.InitializeRegForValue(Inst);
1819 
1820     RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg,
1821                      Inst->getType(), std::nullopt);
1822     SDValue Chain = DAG.getEntryNode();
1823     return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
1824   }
1825 
1826   if (const MetadataAsValue *MD = dyn_cast<MetadataAsValue>(V))
1827     return DAG.getMDNode(cast<MDNode>(MD->getMetadata()));
1828 
1829   if (const auto *BB = dyn_cast<BasicBlock>(V))
1830     return DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
1831 
1832   llvm_unreachable("Can't get register for value!");
1833 }
1834 
1835 void SelectionDAGBuilder::visitCatchPad(const CatchPadInst &I) {
1836   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1837   bool IsMSVCCXX = Pers == EHPersonality::MSVC_CXX;
1838   bool IsCoreCLR = Pers == EHPersonality::CoreCLR;
1839   bool IsSEH = isAsynchronousEHPersonality(Pers);
1840   MachineBasicBlock *CatchPadMBB = FuncInfo.MBB;
1841   if (!IsSEH)
1842     CatchPadMBB->setIsEHScopeEntry();
1843   // In MSVC C++ and CoreCLR, catchblocks are funclets and need prologues.
1844   if (IsMSVCCXX || IsCoreCLR)
1845     CatchPadMBB->setIsEHFuncletEntry();
1846 }
1847 
1848 void SelectionDAGBuilder::visitCatchRet(const CatchReturnInst &I) {
1849   // Update machine-CFG edge.
1850   MachineBasicBlock *TargetMBB = FuncInfo.MBBMap[I.getSuccessor()];
1851   FuncInfo.MBB->addSuccessor(TargetMBB);
1852   TargetMBB->setIsEHCatchretTarget(true);
1853   DAG.getMachineFunction().setHasEHCatchret(true);
1854 
1855   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1856   bool IsSEH = isAsynchronousEHPersonality(Pers);
1857   if (IsSEH) {
1858     // If this is not a fall-through branch or optimizations are switched off,
1859     // emit the branch.
1860     if (TargetMBB != NextBlock(FuncInfo.MBB) ||
1861         TM.getOptLevel() == CodeGenOptLevel::None)
1862       DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
1863                               getControlRoot(), DAG.getBasicBlock(TargetMBB)));
1864     return;
1865   }
1866 
1867   // Figure out the funclet membership for the catchret's successor.
1868   // This will be used by the FuncletLayout pass to determine how to order the
1869   // BB's.
1870   // A 'catchret' returns to the outer scope's color.
1871   Value *ParentPad = I.getCatchSwitchParentPad();
1872   const BasicBlock *SuccessorColor;
1873   if (isa<ConstantTokenNone>(ParentPad))
1874     SuccessorColor = &FuncInfo.Fn->getEntryBlock();
1875   else
1876     SuccessorColor = cast<Instruction>(ParentPad)->getParent();
1877   assert(SuccessorColor && "No parent funclet for catchret!");
1878   MachineBasicBlock *SuccessorColorMBB = FuncInfo.MBBMap[SuccessorColor];
1879   assert(SuccessorColorMBB && "No MBB for SuccessorColor!");
1880 
1881   // Create the terminator node.
1882   SDValue Ret = DAG.getNode(ISD::CATCHRET, getCurSDLoc(), MVT::Other,
1883                             getControlRoot(), DAG.getBasicBlock(TargetMBB),
1884                             DAG.getBasicBlock(SuccessorColorMBB));
1885   DAG.setRoot(Ret);
1886 }
1887 
1888 void SelectionDAGBuilder::visitCleanupPad(const CleanupPadInst &CPI) {
1889   // Don't emit any special code for the cleanuppad instruction. It just marks
1890   // the start of an EH scope/funclet.
1891   FuncInfo.MBB->setIsEHScopeEntry();
1892   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1893   if (Pers != EHPersonality::Wasm_CXX) {
1894     FuncInfo.MBB->setIsEHFuncletEntry();
1895     FuncInfo.MBB->setIsCleanupFuncletEntry();
1896   }
1897 }
1898 
1899 // In wasm EH, even though a catchpad may not catch an exception if a tag does
1900 // not match, it is OK to add only the first unwind destination catchpad to the
1901 // successors, because there will be at least one invoke instruction within the
1902 // catch scope that points to the next unwind destination, if one exists, so
1903 // CFGSort cannot mess up with BB sorting order.
1904 // (All catchpads with 'catch (type)' clauses have a 'llvm.rethrow' intrinsic
1905 // call within them, and catchpads only consisting of 'catch (...)' have a
1906 // '__cxa_end_catch' call within them, both of which generate invokes in case
1907 // the next unwind destination exists, i.e., the next unwind destination is not
1908 // the caller.)
1909 //
1910 // Having at most one EH pad successor is also simpler and helps later
1911 // transformations.
1912 //
1913 // For example,
1914 // current:
1915 //   invoke void @foo to ... unwind label %catch.dispatch
1916 // catch.dispatch:
1917 //   %0 = catchswitch within ... [label %catch.start] unwind label %next
1918 // catch.start:
1919 //   ...
1920 //   ... in this BB or some other child BB dominated by this BB there will be an
1921 //   invoke that points to 'next' BB as an unwind destination
1922 //
1923 // next: ; We don't need to add this to 'current' BB's successor
1924 //   ...
1925 static void findWasmUnwindDestinations(
1926     FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
1927     BranchProbability Prob,
1928     SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
1929         &UnwindDests) {
1930   while (EHPadBB) {
1931     const Instruction *Pad = EHPadBB->getFirstNonPHI();
1932     if (isa<CleanupPadInst>(Pad)) {
1933       // Stop on cleanup pads.
1934       UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1935       UnwindDests.back().first->setIsEHScopeEntry();
1936       break;
1937     } else if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
1938       // Add the catchpad handlers to the possible destinations. We don't
1939       // continue to the unwind destination of the catchswitch for wasm.
1940       for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
1941         UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob);
1942         UnwindDests.back().first->setIsEHScopeEntry();
1943       }
1944       break;
1945     } else {
1946       continue;
1947     }
1948   }
1949 }
1950 
1951 /// When an invoke or a cleanupret unwinds to the next EH pad, there are
1952 /// many places it could ultimately go. In the IR, we have a single unwind
1953 /// destination, but in the machine CFG, we enumerate all the possible blocks.
1954 /// This function skips over imaginary basic blocks that hold catchswitch
1955 /// instructions, and finds all the "real" machine
1956 /// basic block destinations. As those destinations may not be successors of
1957 /// EHPadBB, here we also calculate the edge probability to those destinations.
1958 /// The passed-in Prob is the edge probability to EHPadBB.
1959 static void findUnwindDestinations(
1960     FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
1961     BranchProbability Prob,
1962     SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
1963         &UnwindDests) {
1964   EHPersonality Personality =
1965     classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1966   bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX;
1967   bool IsCoreCLR = Personality == EHPersonality::CoreCLR;
1968   bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX;
1969   bool IsSEH = isAsynchronousEHPersonality(Personality);
1970 
1971   if (IsWasmCXX) {
1972     findWasmUnwindDestinations(FuncInfo, EHPadBB, Prob, UnwindDests);
1973     assert(UnwindDests.size() <= 1 &&
1974            "There should be at most one unwind destination for wasm");
1975     return;
1976   }
1977 
1978   while (EHPadBB) {
1979     const Instruction *Pad = EHPadBB->getFirstNonPHI();
1980     BasicBlock *NewEHPadBB = nullptr;
1981     if (isa<LandingPadInst>(Pad)) {
1982       // Stop on landingpads. They are not funclets.
1983       UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1984       break;
1985     } else if (isa<CleanupPadInst>(Pad)) {
1986       // Stop on cleanup pads. Cleanups are always funclet entries for all known
1987       // personalities.
1988       UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1989       UnwindDests.back().first->setIsEHScopeEntry();
1990       UnwindDests.back().first->setIsEHFuncletEntry();
1991       break;
1992     } else if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
1993       // Add the catchpad handlers to the possible destinations.
1994       for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
1995         UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob);
1996         // For MSVC++ and the CLR, catchblocks are funclets and need prologues.
1997         if (IsMSVCCXX || IsCoreCLR)
1998           UnwindDests.back().first->setIsEHFuncletEntry();
1999         if (!IsSEH)
2000           UnwindDests.back().first->setIsEHScopeEntry();
2001       }
2002       NewEHPadBB = CatchSwitch->getUnwindDest();
2003     } else {
2004       continue;
2005     }
2006 
2007     BranchProbabilityInfo *BPI = FuncInfo.BPI;
2008     if (BPI && NewEHPadBB)
2009       Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB);
2010     EHPadBB = NewEHPadBB;
2011   }
2012 }
2013 
2014 void SelectionDAGBuilder::visitCleanupRet(const CleanupReturnInst &I) {
2015   // Update successor info.
2016   SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
2017   auto UnwindDest = I.getUnwindDest();
2018   BranchProbabilityInfo *BPI = FuncInfo.BPI;
2019   BranchProbability UnwindDestProb =
2020       (BPI && UnwindDest)
2021           ? BPI->getEdgeProbability(FuncInfo.MBB->getBasicBlock(), UnwindDest)
2022           : BranchProbability::getZero();
2023   findUnwindDestinations(FuncInfo, UnwindDest, UnwindDestProb, UnwindDests);
2024   for (auto &UnwindDest : UnwindDests) {
2025     UnwindDest.first->setIsEHPad();
2026     addSuccessorWithProb(FuncInfo.MBB, UnwindDest.first, UnwindDest.second);
2027   }
2028   FuncInfo.MBB->normalizeSuccProbs();
2029 
2030   // Create the terminator node.
2031   SDValue Ret =
2032       DAG.getNode(ISD::CLEANUPRET, getCurSDLoc(), MVT::Other, getControlRoot());
2033   DAG.setRoot(Ret);
2034 }
2035 
2036 void SelectionDAGBuilder::visitCatchSwitch(const CatchSwitchInst &CSI) {
2037   report_fatal_error("visitCatchSwitch not yet implemented!");
2038 }
2039 
2040 void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
2041   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2042   auto &DL = DAG.getDataLayout();
2043   SDValue Chain = getControlRoot();
2044   SmallVector<ISD::OutputArg, 8> Outs;
2045   SmallVector<SDValue, 8> OutVals;
2046 
2047   // Calls to @llvm.experimental.deoptimize don't generate a return value, so
2048   // lower
2049   //
2050   //   %val = call <ty> @llvm.experimental.deoptimize()
2051   //   ret <ty> %val
2052   //
2053   // differently.
2054   if (I.getParent()->getTerminatingDeoptimizeCall()) {
2055     LowerDeoptimizingReturn();
2056     return;
2057   }
2058 
2059   if (!FuncInfo.CanLowerReturn) {
2060     unsigned DemoteReg = FuncInfo.DemoteRegister;
2061     const Function *F = I.getParent()->getParent();
2062 
2063     // Emit a store of the return value through the virtual register.
2064     // Leave Outs empty so that LowerReturn won't try to load return
2065     // registers the usual way.
2066     SmallVector<EVT, 1> PtrValueVTs;
2067     ComputeValueVTs(TLI, DL,
2068                     PointerType::get(F->getContext(),
2069                                      DAG.getDataLayout().getAllocaAddrSpace()),
2070                     PtrValueVTs);
2071 
2072     SDValue RetPtr =
2073         DAG.getCopyFromReg(Chain, getCurSDLoc(), DemoteReg, PtrValueVTs[0]);
2074     SDValue RetOp = getValue(I.getOperand(0));
2075 
2076     SmallVector<EVT, 4> ValueVTs, MemVTs;
2077     SmallVector<uint64_t, 4> Offsets;
2078     ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs, &MemVTs,
2079                     &Offsets, 0);
2080     unsigned NumValues = ValueVTs.size();
2081 
2082     SmallVector<SDValue, 4> Chains(NumValues);
2083     Align BaseAlign = DL.getPrefTypeAlign(I.getOperand(0)->getType());
2084     for (unsigned i = 0; i != NumValues; ++i) {
2085       // An aggregate return value cannot wrap around the address space, so
2086       // offsets to its parts don't wrap either.
2087       SDValue Ptr = DAG.getObjectPtrOffset(getCurSDLoc(), RetPtr,
2088                                            TypeSize::getFixed(Offsets[i]));
2089 
2090       SDValue Val = RetOp.getValue(RetOp.getResNo() + i);
2091       if (MemVTs[i] != ValueVTs[i])
2092         Val = DAG.getPtrExtOrTrunc(Val, getCurSDLoc(), MemVTs[i]);
2093       Chains[i] = DAG.getStore(
2094           Chain, getCurSDLoc(), Val,
2095           // FIXME: better loc info would be nice.
2096           Ptr, MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()),
2097           commonAlignment(BaseAlign, Offsets[i]));
2098     }
2099 
2100     Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
2101                         MVT::Other, Chains);
2102   } else if (I.getNumOperands() != 0) {
2103     SmallVector<EVT, 4> ValueVTs;
2104     ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs);
2105     unsigned NumValues = ValueVTs.size();
2106     if (NumValues) {
2107       SDValue RetOp = getValue(I.getOperand(0));
2108 
2109       const Function *F = I.getParent()->getParent();
2110 
2111       bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
2112           I.getOperand(0)->getType(), F->getCallingConv(),
2113           /*IsVarArg*/ false, DL);
2114 
2115       ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
2116       if (F->getAttributes().hasRetAttr(Attribute::SExt))
2117         ExtendKind = ISD::SIGN_EXTEND;
2118       else if (F->getAttributes().hasRetAttr(Attribute::ZExt))
2119         ExtendKind = ISD::ZERO_EXTEND;
2120 
2121       LLVMContext &Context = F->getContext();
2122       bool RetInReg = F->getAttributes().hasRetAttr(Attribute::InReg);
2123 
2124       for (unsigned j = 0; j != NumValues; ++j) {
2125         EVT VT = ValueVTs[j];
2126 
2127         if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
2128           VT = TLI.getTypeForExtReturn(Context, VT, ExtendKind);
2129 
2130         CallingConv::ID CC = F->getCallingConv();
2131 
2132         unsigned NumParts = TLI.getNumRegistersForCallingConv(Context, CC, VT);
2133         MVT PartVT = TLI.getRegisterTypeForCallingConv(Context, CC, VT);
2134         SmallVector<SDValue, 4> Parts(NumParts);
2135         getCopyToParts(DAG, getCurSDLoc(),
2136                        SDValue(RetOp.getNode(), RetOp.getResNo() + j),
2137                        &Parts[0], NumParts, PartVT, &I, CC, ExtendKind);
2138 
2139         // 'inreg' on function refers to return value
2140         ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
2141         if (RetInReg)
2142           Flags.setInReg();
2143 
2144         if (I.getOperand(0)->getType()->isPointerTy()) {
2145           Flags.setPointer();
2146           Flags.setPointerAddrSpace(
2147               cast<PointerType>(I.getOperand(0)->getType())->getAddressSpace());
2148         }
2149 
2150         if (NeedsRegBlock) {
2151           Flags.setInConsecutiveRegs();
2152           if (j == NumValues - 1)
2153             Flags.setInConsecutiveRegsLast();
2154         }
2155 
2156         // Propagate extension type if any
2157         if (ExtendKind == ISD::SIGN_EXTEND)
2158           Flags.setSExt();
2159         else if (ExtendKind == ISD::ZERO_EXTEND)
2160           Flags.setZExt();
2161 
2162         for (unsigned i = 0; i < NumParts; ++i) {
2163           Outs.push_back(ISD::OutputArg(Flags,
2164                                         Parts[i].getValueType().getSimpleVT(),
2165                                         VT, /*isfixed=*/true, 0, 0));
2166           OutVals.push_back(Parts[i]);
2167         }
2168       }
2169     }
2170   }
2171 
2172   // Push in swifterror virtual register as the last element of Outs. This makes
2173   // sure swifterror virtual register will be returned in the swifterror
2174   // physical register.
2175   const Function *F = I.getParent()->getParent();
2176   if (TLI.supportSwiftError() &&
2177       F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
2178     assert(SwiftError.getFunctionArg() && "Need a swift error argument");
2179     ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
2180     Flags.setSwiftError();
2181     Outs.push_back(ISD::OutputArg(
2182         Flags, /*vt=*/TLI.getPointerTy(DL), /*argvt=*/EVT(TLI.getPointerTy(DL)),
2183         /*isfixed=*/true, /*origidx=*/1, /*partOffs=*/0));
2184     // Create SDNode for the swifterror virtual register.
2185     OutVals.push_back(
2186         DAG.getRegister(SwiftError.getOrCreateVRegUseAt(
2187                             &I, FuncInfo.MBB, SwiftError.getFunctionArg()),
2188                         EVT(TLI.getPointerTy(DL))));
2189   }
2190 
2191   bool isVarArg = DAG.getMachineFunction().getFunction().isVarArg();
2192   CallingConv::ID CallConv =
2193     DAG.getMachineFunction().getFunction().getCallingConv();
2194   Chain = DAG.getTargetLoweringInfo().LowerReturn(
2195       Chain, CallConv, isVarArg, Outs, OutVals, getCurSDLoc(), DAG);
2196 
2197   // Verify that the target's LowerReturn behaved as expected.
2198   assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
2199          "LowerReturn didn't return a valid chain!");
2200 
2201   // Update the DAG with the new chain value resulting from return lowering.
2202   DAG.setRoot(Chain);
2203 }
2204 
2205 /// CopyToExportRegsIfNeeded - If the given value has virtual registers
2206 /// created for it, emit nodes to copy the value into the virtual
2207 /// registers.
2208 void SelectionDAGBuilder::CopyToExportRegsIfNeeded(const Value *V) {
2209   // Skip empty types
2210   if (V->getType()->isEmptyTy())
2211     return;
2212 
2213   DenseMap<const Value *, Register>::iterator VMI = FuncInfo.ValueMap.find(V);
2214   if (VMI != FuncInfo.ValueMap.end()) {
2215     assert((!V->use_empty() || isa<CallBrInst>(V)) &&
2216            "Unused value assigned virtual registers!");
2217     CopyValueToVirtualRegister(V, VMI->second);
2218   }
2219 }
2220 
2221 /// ExportFromCurrentBlock - If this condition isn't known to be exported from
2222 /// the current basic block, add it to ValueMap now so that we'll get a
2223 /// CopyTo/FromReg.
2224 void SelectionDAGBuilder::ExportFromCurrentBlock(const Value *V) {
2225   // No need to export constants.
2226   if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
2227 
2228   // Already exported?
2229   if (FuncInfo.isExportedInst(V)) return;
2230 
2231   Register Reg = FuncInfo.InitializeRegForValue(V);
2232   CopyValueToVirtualRegister(V, Reg);
2233 }
2234 
2235 bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V,
2236                                                      const BasicBlock *FromBB) {
2237   // The operands of the setcc have to be in this block.  We don't know
2238   // how to export them from some other block.
2239   if (const Instruction *VI = dyn_cast<Instruction>(V)) {
2240     // Can export from current BB.
2241     if (VI->getParent() == FromBB)
2242       return true;
2243 
2244     // Is already exported, noop.
2245     return FuncInfo.isExportedInst(V);
2246   }
2247 
2248   // If this is an argument, we can export it if the BB is the entry block or
2249   // if it is already exported.
2250   if (isa<Argument>(V)) {
2251     if (FromBB->isEntryBlock())
2252       return true;
2253 
2254     // Otherwise, can only export this if it is already exported.
2255     return FuncInfo.isExportedInst(V);
2256   }
2257 
2258   // Otherwise, constants can always be exported.
2259   return true;
2260 }
2261 
2262 /// Return branch probability calculated by BranchProbabilityInfo for IR blocks.
2263 BranchProbability
2264 SelectionDAGBuilder::getEdgeProbability(const MachineBasicBlock *Src,
2265                                         const MachineBasicBlock *Dst) const {
2266   BranchProbabilityInfo *BPI = FuncInfo.BPI;
2267   const BasicBlock *SrcBB = Src->getBasicBlock();
2268   const BasicBlock *DstBB = Dst->getBasicBlock();
2269   if (!BPI) {
2270     // If BPI is not available, set the default probability as 1 / N, where N is
2271     // the number of successors.
2272     auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
2273     return BranchProbability(1, SuccSize);
2274   }
2275   return BPI->getEdgeProbability(SrcBB, DstBB);
2276 }
2277 
2278 void SelectionDAGBuilder::addSuccessorWithProb(MachineBasicBlock *Src,
2279                                                MachineBasicBlock *Dst,
2280                                                BranchProbability Prob) {
2281   if (!FuncInfo.BPI)
2282     Src->addSuccessorWithoutProb(Dst);
2283   else {
2284     if (Prob.isUnknown())
2285       Prob = getEdgeProbability(Src, Dst);
2286     Src->addSuccessor(Dst, Prob);
2287   }
2288 }
2289 
2290 static bool InBlock(const Value *V, const BasicBlock *BB) {
2291   if (const Instruction *I = dyn_cast<Instruction>(V))
2292     return I->getParent() == BB;
2293   return true;
2294 }
2295 
2296 /// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
2297 /// This function emits a branch and is used at the leaves of an OR or an
2298 /// AND operator tree.
2299 void
2300 SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
2301                                                   MachineBasicBlock *TBB,
2302                                                   MachineBasicBlock *FBB,
2303                                                   MachineBasicBlock *CurBB,
2304                                                   MachineBasicBlock *SwitchBB,
2305                                                   BranchProbability TProb,
2306                                                   BranchProbability FProb,
2307                                                   bool InvertCond) {
2308   const BasicBlock *BB = CurBB->getBasicBlock();
2309 
2310   // If the leaf of the tree is a comparison, merge the condition into
2311   // the caseblock.
2312   if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
2313     // The operands of the cmp have to be in this block.  We don't know
2314     // how to export them from some other block.  If this is the first block
2315     // of the sequence, no exporting is needed.
2316     if (CurBB == SwitchBB ||
2317         (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
2318          isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
2319       ISD::CondCode Condition;
2320       if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
2321         ICmpInst::Predicate Pred =
2322             InvertCond ? IC->getInversePredicate() : IC->getPredicate();
2323         Condition = getICmpCondCode(Pred);
2324       } else {
2325         const FCmpInst *FC = cast<FCmpInst>(Cond);
2326         FCmpInst::Predicate Pred =
2327             InvertCond ? FC->getInversePredicate() : FC->getPredicate();
2328         Condition = getFCmpCondCode(Pred);
2329         if (TM.Options.NoNaNsFPMath)
2330           Condition = getFCmpCodeWithoutNaN(Condition);
2331       }
2332 
2333       CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1), nullptr,
2334                    TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
2335       SL->SwitchCases.push_back(CB);
2336       return;
2337     }
2338   }
2339 
2340   // Create a CaseBlock record representing this branch.
2341   ISD::CondCode Opc = InvertCond ? ISD::SETNE : ISD::SETEQ;
2342   CaseBlock CB(Opc, Cond, ConstantInt::getTrue(*DAG.getContext()),
2343                nullptr, TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
2344   SL->SwitchCases.push_back(CB);
2345 }
2346 
2347 void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
2348                                                MachineBasicBlock *TBB,
2349                                                MachineBasicBlock *FBB,
2350                                                MachineBasicBlock *CurBB,
2351                                                MachineBasicBlock *SwitchBB,
2352                                                Instruction::BinaryOps Opc,
2353                                                BranchProbability TProb,
2354                                                BranchProbability FProb,
2355                                                bool InvertCond) {
2356   // Skip over not part of the tree and remember to invert op and operands at
2357   // next level.
2358   Value *NotCond;
2359   if (match(Cond, m_OneUse(m_Not(m_Value(NotCond)))) &&
2360       InBlock(NotCond, CurBB->getBasicBlock())) {
2361     FindMergedConditions(NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
2362                          !InvertCond);
2363     return;
2364   }
2365 
2366   const Instruction *BOp = dyn_cast<Instruction>(Cond);
2367   const Value *BOpOp0, *BOpOp1;
2368   // Compute the effective opcode for Cond, taking into account whether it needs
2369   // to be inverted, e.g.
2370   //   and (not (or A, B)), C
2371   // gets lowered as
2372   //   and (and (not A, not B), C)
2373   Instruction::BinaryOps BOpc = (Instruction::BinaryOps)0;
2374   if (BOp) {
2375     BOpc = match(BOp, m_LogicalAnd(m_Value(BOpOp0), m_Value(BOpOp1)))
2376                ? Instruction::And
2377                : (match(BOp, m_LogicalOr(m_Value(BOpOp0), m_Value(BOpOp1)))
2378                       ? Instruction::Or
2379                       : (Instruction::BinaryOps)0);
2380     if (InvertCond) {
2381       if (BOpc == Instruction::And)
2382         BOpc = Instruction::Or;
2383       else if (BOpc == Instruction::Or)
2384         BOpc = Instruction::And;
2385     }
2386   }
2387 
2388   // If this node is not part of the or/and tree, emit it as a branch.
2389   // Note that all nodes in the tree should have same opcode.
2390   bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->hasOneUse();
2391   if (!BOpIsInOrAndTree || BOp->getParent() != CurBB->getBasicBlock() ||
2392       !InBlock(BOpOp0, CurBB->getBasicBlock()) ||
2393       !InBlock(BOpOp1, CurBB->getBasicBlock())) {
2394     EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB,
2395                                  TProb, FProb, InvertCond);
2396     return;
2397   }
2398 
2399   //  Create TmpBB after CurBB.
2400   MachineFunction::iterator BBI(CurBB);
2401   MachineFunction &MF = DAG.getMachineFunction();
2402   MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
2403   CurBB->getParent()->insert(++BBI, TmpBB);
2404 
2405   if (Opc == Instruction::Or) {
2406     // Codegen X | Y as:
2407     // BB1:
2408     //   jmp_if_X TBB
2409     //   jmp TmpBB
2410     // TmpBB:
2411     //   jmp_if_Y TBB
2412     //   jmp FBB
2413     //
2414 
2415     // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
2416     // The requirement is that
2417     //   TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
2418     //     = TrueProb for original BB.
2419     // Assuming the original probabilities are A and B, one choice is to set
2420     // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
2421     // A/(1+B) and 2B/(1+B). This choice assumes that
2422     //   TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
2423     // Another choice is to assume TrueProb for BB1 equals to TrueProb for
2424     // TmpBB, but the math is more complicated.
2425 
2426     auto NewTrueProb = TProb / 2;
2427     auto NewFalseProb = TProb / 2 + FProb;
2428     // Emit the LHS condition.
2429     FindMergedConditions(BOpOp0, TBB, TmpBB, CurBB, SwitchBB, Opc, NewTrueProb,
2430                          NewFalseProb, InvertCond);
2431 
2432     // Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
2433     SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb};
2434     BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
2435     // Emit the RHS condition into TmpBB.
2436     FindMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
2437                          Probs[1], InvertCond);
2438   } else {
2439     assert(Opc == Instruction::And && "Unknown merge op!");
2440     // Codegen X & Y as:
2441     // BB1:
2442     //   jmp_if_X TmpBB
2443     //   jmp FBB
2444     // TmpBB:
2445     //   jmp_if_Y TBB
2446     //   jmp FBB
2447     //
2448     //  This requires creation of TmpBB after CurBB.
2449 
2450     // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
2451     // The requirement is that
2452     //   FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
2453     //     = FalseProb for original BB.
2454     // Assuming the original probabilities are A and B, one choice is to set
2455     // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
2456     // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
2457     // TrueProb for BB1 * FalseProb for TmpBB.
2458 
2459     auto NewTrueProb = TProb + FProb / 2;
2460     auto NewFalseProb = FProb / 2;
2461     // Emit the LHS condition.
2462     FindMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB, Opc, NewTrueProb,
2463                          NewFalseProb, InvertCond);
2464 
2465     // Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
2466     SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2};
2467     BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
2468     // Emit the RHS condition into TmpBB.
2469     FindMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
2470                          Probs[1], InvertCond);
2471   }
2472 }
2473 
2474 /// If the set of cases should be emitted as a series of branches, return true.
2475 /// If we should emit this as a bunch of and/or'd together conditions, return
2476 /// false.
2477 bool
2478 SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases) {
2479   if (Cases.size() != 2) return true;
2480 
2481   // If this is two comparisons of the same values or'd or and'd together, they
2482   // will get folded into a single comparison, so don't emit two blocks.
2483   if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
2484        Cases[0].CmpRHS == Cases[1].CmpRHS) ||
2485       (Cases[0].CmpRHS == Cases[1].CmpLHS &&
2486        Cases[0].CmpLHS == Cases[1].CmpRHS)) {
2487     return false;
2488   }
2489 
2490   // Handle: (X != null) | (Y != null) --> (X|Y) != 0
2491   // Handle: (X == null) & (Y == null) --> (X|Y) == 0
2492   if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
2493       Cases[0].CC == Cases[1].CC &&
2494       isa<Constant>(Cases[0].CmpRHS) &&
2495       cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
2496     if (Cases[0].CC == ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
2497       return false;
2498     if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
2499       return false;
2500   }
2501 
2502   return true;
2503 }
2504 
2505 void SelectionDAGBuilder::visitBr(const BranchInst &I) {
2506   MachineBasicBlock *BrMBB = FuncInfo.MBB;
2507 
2508   // Update machine-CFG edges.
2509   MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
2510 
2511   if (I.isUnconditional()) {
2512     // Update machine-CFG edges.
2513     BrMBB->addSuccessor(Succ0MBB);
2514 
2515     // If this is not a fall-through branch or optimizations are switched off,
2516     // emit the branch.
2517     if (Succ0MBB != NextBlock(BrMBB) ||
2518         TM.getOptLevel() == CodeGenOptLevel::None) {
2519       auto Br = DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
2520                             getControlRoot(), DAG.getBasicBlock(Succ0MBB));
2521       setValue(&I, Br);
2522       DAG.setRoot(Br);
2523     }
2524 
2525     return;
2526   }
2527 
2528   // If this condition is one of the special cases we handle, do special stuff
2529   // now.
2530   const Value *CondVal = I.getCondition();
2531   MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
2532 
2533   // If this is a series of conditions that are or'd or and'd together, emit
2534   // this as a sequence of branches instead of setcc's with and/or operations.
2535   // As long as jumps are not expensive (exceptions for multi-use logic ops,
2536   // unpredictable branches, and vector extracts because those jumps are likely
2537   // expensive for any target), this should improve performance.
2538   // For example, instead of something like:
2539   //     cmp A, B
2540   //     C = seteq
2541   //     cmp D, E
2542   //     F = setle
2543   //     or C, F
2544   //     jnz foo
2545   // Emit:
2546   //     cmp A, B
2547   //     je foo
2548   //     cmp D, E
2549   //     jle foo
2550   const Instruction *BOp = dyn_cast<Instruction>(CondVal);
2551   if (!DAG.getTargetLoweringInfo().isJumpExpensive() && BOp &&
2552       BOp->hasOneUse() && !I.hasMetadata(LLVMContext::MD_unpredictable)) {
2553     Value *Vec;
2554     const Value *BOp0, *BOp1;
2555     Instruction::BinaryOps Opcode = (Instruction::BinaryOps)0;
2556     if (match(BOp, m_LogicalAnd(m_Value(BOp0), m_Value(BOp1))))
2557       Opcode = Instruction::And;
2558     else if (match(BOp, m_LogicalOr(m_Value(BOp0), m_Value(BOp1))))
2559       Opcode = Instruction::Or;
2560 
2561     if (Opcode && !(match(BOp0, m_ExtractElt(m_Value(Vec), m_Value())) &&
2562                     match(BOp1, m_ExtractElt(m_Specific(Vec), m_Value())))) {
2563       FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB, Opcode,
2564                            getEdgeProbability(BrMBB, Succ0MBB),
2565                            getEdgeProbability(BrMBB, Succ1MBB),
2566                            /*InvertCond=*/false);
2567       // If the compares in later blocks need to use values not currently
2568       // exported from this block, export them now.  This block should always
2569       // be the first entry.
2570       assert(SL->SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!");
2571 
2572       // Allow some cases to be rejected.
2573       if (ShouldEmitAsBranches(SL->SwitchCases)) {
2574         for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i) {
2575           ExportFromCurrentBlock(SL->SwitchCases[i].CmpLHS);
2576           ExportFromCurrentBlock(SL->SwitchCases[i].CmpRHS);
2577         }
2578 
2579         // Emit the branch for this block.
2580         visitSwitchCase(SL->SwitchCases[0], BrMBB);
2581         SL->SwitchCases.erase(SL->SwitchCases.begin());
2582         return;
2583       }
2584 
2585       // Okay, we decided not to do this, remove any inserted MBB's and clear
2586       // SwitchCases.
2587       for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i)
2588         FuncInfo.MF->erase(SL->SwitchCases[i].ThisBB);
2589 
2590       SL->SwitchCases.clear();
2591     }
2592   }
2593 
2594   // Create a CaseBlock record representing this branch.
2595   CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
2596                nullptr, Succ0MBB, Succ1MBB, BrMBB, getCurSDLoc());
2597 
2598   // Use visitSwitchCase to actually insert the fast branch sequence for this
2599   // cond branch.
2600   visitSwitchCase(CB, BrMBB);
2601 }
2602 
2603 /// visitSwitchCase - Emits the necessary code to represent a single node in
2604 /// the binary search tree resulting from lowering a switch instruction.
2605 void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB,
2606                                           MachineBasicBlock *SwitchBB) {
2607   SDValue Cond;
2608   SDValue CondLHS = getValue(CB.CmpLHS);
2609   SDLoc dl = CB.DL;
2610 
2611   if (CB.CC == ISD::SETTRUE) {
2612     // Branch or fall through to TrueBB.
2613     addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
2614     SwitchBB->normalizeSuccProbs();
2615     if (CB.TrueBB != NextBlock(SwitchBB)) {
2616       DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, getControlRoot(),
2617                               DAG.getBasicBlock(CB.TrueBB)));
2618     }
2619     return;
2620   }
2621 
2622   auto &TLI = DAG.getTargetLoweringInfo();
2623   EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), CB.CmpLHS->getType());
2624 
2625   // Build the setcc now.
2626   if (!CB.CmpMHS) {
2627     // Fold "(X == true)" to X and "(X == false)" to !X to
2628     // handle common cases produced by branch lowering.
2629     if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
2630         CB.CC == ISD::SETEQ)
2631       Cond = CondLHS;
2632     else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) &&
2633              CB.CC == ISD::SETEQ) {
2634       SDValue True = DAG.getConstant(1, dl, CondLHS.getValueType());
2635       Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
2636     } else {
2637       SDValue CondRHS = getValue(CB.CmpRHS);
2638 
2639       // If a pointer's DAG type is larger than its memory type then the DAG
2640       // values are zero-extended. This breaks signed comparisons so truncate
2641       // back to the underlying type before doing the compare.
2642       if (CondLHS.getValueType() != MemVT) {
2643         CondLHS = DAG.getPtrExtOrTrunc(CondLHS, getCurSDLoc(), MemVT);
2644         CondRHS = DAG.getPtrExtOrTrunc(CondRHS, getCurSDLoc(), MemVT);
2645       }
2646       Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, CondRHS, CB.CC);
2647     }
2648   } else {
2649     assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
2650 
2651     const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
2652     const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
2653 
2654     SDValue CmpOp = getValue(CB.CmpMHS);
2655     EVT VT = CmpOp.getValueType();
2656 
2657     if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
2658       Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, dl, VT),
2659                           ISD::SETLE);
2660     } else {
2661       SDValue SUB = DAG.getNode(ISD::SUB, dl,
2662                                 VT, CmpOp, DAG.getConstant(Low, dl, VT));
2663       Cond = DAG.getSetCC(dl, MVT::i1, SUB,
2664                           DAG.getConstant(High-Low, dl, VT), ISD::SETULE);
2665     }
2666   }
2667 
2668   // Update successor info
2669   addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
2670   // TrueBB and FalseBB are always different unless the incoming IR is
2671   // degenerate. This only happens when running llc on weird IR.
2672   if (CB.TrueBB != CB.FalseBB)
2673     addSuccessorWithProb(SwitchBB, CB.FalseBB, CB.FalseProb);
2674   SwitchBB->normalizeSuccProbs();
2675 
2676   // If the lhs block is the next block, invert the condition so that we can
2677   // fall through to the lhs instead of the rhs block.
2678   if (CB.TrueBB == NextBlock(SwitchBB)) {
2679     std::swap(CB.TrueBB, CB.FalseBB);
2680     SDValue True = DAG.getConstant(1, dl, Cond.getValueType());
2681     Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
2682   }
2683 
2684   SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2685                                MVT::Other, getControlRoot(), Cond,
2686                                DAG.getBasicBlock(CB.TrueBB));
2687 
2688   setValue(CurInst, BrCond);
2689 
2690   // Insert the false branch. Do this even if it's a fall through branch,
2691   // this makes it easier to do DAG optimizations which require inverting
2692   // the branch condition.
2693   BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2694                        DAG.getBasicBlock(CB.FalseBB));
2695 
2696   DAG.setRoot(BrCond);
2697 }
2698 
2699 /// visitJumpTable - Emit JumpTable node in the current MBB
2700 void SelectionDAGBuilder::visitJumpTable(SwitchCG::JumpTable &JT) {
2701   // Emit the code for the jump table
2702   assert(JT.SL && "Should set SDLoc for SelectionDAG!");
2703   assert(JT.Reg != -1U && "Should lower JT Header first!");
2704   EVT PTy = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
2705   SDValue Index = DAG.getCopyFromReg(getControlRoot(), *JT.SL, JT.Reg, PTy);
2706   SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
2707   SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, *JT.SL, MVT::Other,
2708                                     Index.getValue(1), Table, Index);
2709   DAG.setRoot(BrJumpTable);
2710 }
2711 
2712 /// visitJumpTableHeader - This function emits necessary code to produce index
2713 /// in the JumpTable from switch case.
2714 void SelectionDAGBuilder::visitJumpTableHeader(SwitchCG::JumpTable &JT,
2715                                                JumpTableHeader &JTH,
2716                                                MachineBasicBlock *SwitchBB) {
2717   assert(JT.SL && "Should set SDLoc for SelectionDAG!");
2718   const SDLoc &dl = *JT.SL;
2719 
2720   // Subtract the lowest switch case value from the value being switched on.
2721   SDValue SwitchOp = getValue(JTH.SValue);
2722   EVT VT = SwitchOp.getValueType();
2723   SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp,
2724                             DAG.getConstant(JTH.First, dl, VT));
2725 
2726   // The SDNode we just created, which holds the value being switched on minus
2727   // the smallest case value, needs to be copied to a virtual register so it
2728   // can be used as an index into the jump table in a subsequent basic block.
2729   // This value may be smaller or larger than the target's pointer type, and
2730   // therefore require extension or truncating.
2731   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2732   SwitchOp = DAG.getZExtOrTrunc(Sub, dl, TLI.getPointerTy(DAG.getDataLayout()));
2733 
2734   unsigned JumpTableReg =
2735       FuncInfo.CreateReg(TLI.getPointerTy(DAG.getDataLayout()));
2736   SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl,
2737                                     JumpTableReg, SwitchOp);
2738   JT.Reg = JumpTableReg;
2739 
2740   if (!JTH.FallthroughUnreachable) {
2741     // Emit the range check for the jump table, and branch to the default block
2742     // for the switch statement if the value being switched on exceeds the
2743     // largest case in the switch.
2744     SDValue CMP = DAG.getSetCC(
2745         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
2746                                    Sub.getValueType()),
2747         Sub, DAG.getConstant(JTH.Last - JTH.First, dl, VT), ISD::SETUGT);
2748 
2749     SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2750                                  MVT::Other, CopyTo, CMP,
2751                                  DAG.getBasicBlock(JT.Default));
2752 
2753     // Avoid emitting unnecessary branches to the next block.
2754     if (JT.MBB != NextBlock(SwitchBB))
2755       BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2756                            DAG.getBasicBlock(JT.MBB));
2757 
2758     DAG.setRoot(BrCond);
2759   } else {
2760     // Avoid emitting unnecessary branches to the next block.
2761     if (JT.MBB != NextBlock(SwitchBB))
2762       DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, CopyTo,
2763                               DAG.getBasicBlock(JT.MBB)));
2764     else
2765       DAG.setRoot(CopyTo);
2766   }
2767 }
2768 
2769 /// Create a LOAD_STACK_GUARD node, and let it carry the target specific global
2770 /// variable if there exists one.
2771 static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL,
2772                                  SDValue &Chain) {
2773   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2774   EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
2775   EVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout());
2776   MachineFunction &MF = DAG.getMachineFunction();
2777   Value *Global = TLI.getSDagStackGuard(*MF.getFunction().getParent());
2778   MachineSDNode *Node =
2779       DAG.getMachineNode(TargetOpcode::LOAD_STACK_GUARD, DL, PtrTy, Chain);
2780   if (Global) {
2781     MachinePointerInfo MPInfo(Global);
2782     auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
2783                  MachineMemOperand::MODereferenceable;
2784     MachineMemOperand *MemRef = MF.getMachineMemOperand(
2785         MPInfo, Flags, PtrTy.getSizeInBits() / 8, DAG.getEVTAlign(PtrTy));
2786     DAG.setNodeMemRefs(Node, {MemRef});
2787   }
2788   if (PtrTy != PtrMemTy)
2789     return DAG.getPtrExtOrTrunc(SDValue(Node, 0), DL, PtrMemTy);
2790   return SDValue(Node, 0);
2791 }
2792 
2793 /// Codegen a new tail for a stack protector check ParentMBB which has had its
2794 /// tail spliced into a stack protector check success bb.
2795 ///
2796 /// For a high level explanation of how this fits into the stack protector
2797 /// generation see the comment on the declaration of class
2798 /// StackProtectorDescriptor.
2799 void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
2800                                                   MachineBasicBlock *ParentBB) {
2801 
2802   // First create the loads to the guard/stack slot for the comparison.
2803   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2804   EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
2805   EVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout());
2806 
2807   MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
2808   int FI = MFI.getStackProtectorIndex();
2809 
2810   SDValue Guard;
2811   SDLoc dl = getCurSDLoc();
2812   SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy);
2813   const Module &M = *ParentBB->getParent()->getFunction().getParent();
2814   Align Align =
2815       DAG.getDataLayout().getPrefTypeAlign(PointerType::get(M.getContext(), 0));
2816 
2817   // Generate code to load the content of the guard slot.
2818   SDValue GuardVal = DAG.getLoad(
2819       PtrMemTy, dl, DAG.getEntryNode(), StackSlotPtr,
2820       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), Align,
2821       MachineMemOperand::MOVolatile);
2822 
2823   if (TLI.useStackGuardXorFP())
2824     GuardVal = TLI.emitStackGuardXorFP(DAG, GuardVal, dl);
2825 
2826   // Retrieve guard check function, nullptr if instrumentation is inlined.
2827   if (const Function *GuardCheckFn = TLI.getSSPStackGuardCheck(M)) {
2828     // The target provides a guard check function to validate the guard value.
2829     // Generate a call to that function with the content of the guard slot as
2830     // argument.
2831     FunctionType *FnTy = GuardCheckFn->getFunctionType();
2832     assert(FnTy->getNumParams() == 1 && "Invalid function signature");
2833 
2834     TargetLowering::ArgListTy Args;
2835     TargetLowering::ArgListEntry Entry;
2836     Entry.Node = GuardVal;
2837     Entry.Ty = FnTy->getParamType(0);
2838     if (GuardCheckFn->hasParamAttribute(0, Attribute::AttrKind::InReg))
2839       Entry.IsInReg = true;
2840     Args.push_back(Entry);
2841 
2842     TargetLowering::CallLoweringInfo CLI(DAG);
2843     CLI.setDebugLoc(getCurSDLoc())
2844         .setChain(DAG.getEntryNode())
2845         .setCallee(GuardCheckFn->getCallingConv(), FnTy->getReturnType(),
2846                    getValue(GuardCheckFn), std::move(Args));
2847 
2848     std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
2849     DAG.setRoot(Result.second);
2850     return;
2851   }
2852 
2853   // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
2854   // Otherwise, emit a volatile load to retrieve the stack guard value.
2855   SDValue Chain = DAG.getEntryNode();
2856   if (TLI.useLoadStackGuardNode()) {
2857     Guard = getLoadStackGuard(DAG, dl, Chain);
2858   } else {
2859     const Value *IRGuard = TLI.getSDagStackGuard(M);
2860     SDValue GuardPtr = getValue(IRGuard);
2861 
2862     Guard = DAG.getLoad(PtrMemTy, dl, Chain, GuardPtr,
2863                         MachinePointerInfo(IRGuard, 0), Align,
2864                         MachineMemOperand::MOVolatile);
2865   }
2866 
2867   // Perform the comparison via a getsetcc.
2868   SDValue Cmp = DAG.getSetCC(dl, TLI.getSetCCResultType(DAG.getDataLayout(),
2869                                                         *DAG.getContext(),
2870                                                         Guard.getValueType()),
2871                              Guard, GuardVal, ISD::SETNE);
2872 
2873   // If the guard/stackslot do not equal, branch to failure MBB.
2874   SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2875                                MVT::Other, GuardVal.getOperand(0),
2876                                Cmp, DAG.getBasicBlock(SPD.getFailureMBB()));
2877   // Otherwise branch to success MBB.
2878   SDValue Br = DAG.getNode(ISD::BR, dl,
2879                            MVT::Other, BrCond,
2880                            DAG.getBasicBlock(SPD.getSuccessMBB()));
2881 
2882   DAG.setRoot(Br);
2883 }
2884 
2885 /// Codegen the failure basic block for a stack protector check.
2886 ///
2887 /// A failure stack protector machine basic block consists simply of a call to
2888 /// __stack_chk_fail().
2889 ///
2890 /// For a high level explanation of how this fits into the stack protector
2891 /// generation see the comment on the declaration of class
2892 /// StackProtectorDescriptor.
2893 void
2894 SelectionDAGBuilder::visitSPDescriptorFailure(StackProtectorDescriptor &SPD) {
2895   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2896   TargetLowering::MakeLibCallOptions CallOptions;
2897   CallOptions.setDiscardResult(true);
2898   SDValue Chain =
2899       TLI.makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid,
2900                       std::nullopt, CallOptions, getCurSDLoc())
2901           .second;
2902   // On PS4/PS5, the "return address" must still be within the calling
2903   // function, even if it's at the very end, so emit an explicit TRAP here.
2904   // Passing 'true' for doesNotReturn above won't generate the trap for us.
2905   if (TM.getTargetTriple().isPS())
2906     Chain = DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, Chain);
2907   // WebAssembly needs an unreachable instruction after a non-returning call,
2908   // because the function return type can be different from __stack_chk_fail's
2909   // return type (void).
2910   if (TM.getTargetTriple().isWasm())
2911     Chain = DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, Chain);
2912 
2913   DAG.setRoot(Chain);
2914 }
2915 
2916 /// visitBitTestHeader - This function emits necessary code to produce value
2917 /// suitable for "bit tests"
2918 void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
2919                                              MachineBasicBlock *SwitchBB) {
2920   SDLoc dl = getCurSDLoc();
2921 
2922   // Subtract the minimum value.
2923   SDValue SwitchOp = getValue(B.SValue);
2924   EVT VT = SwitchOp.getValueType();
2925   SDValue RangeSub =
2926       DAG.getNode(ISD::SUB, dl, VT, SwitchOp, DAG.getConstant(B.First, dl, VT));
2927 
2928   // Determine the type of the test operands.
2929   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2930   bool UsePtrType = false;
2931   if (!TLI.isTypeLegal(VT)) {
2932     UsePtrType = true;
2933   } else {
2934     for (unsigned i = 0, e = B.Cases.size(); i != e; ++i)
2935       if (!isUIntN(VT.getSizeInBits(), B.Cases[i].Mask)) {
2936         // Switch table case range are encoded into series of masks.
2937         // Just use pointer type, it's guaranteed to fit.
2938         UsePtrType = true;
2939         break;
2940       }
2941   }
2942   SDValue Sub = RangeSub;
2943   if (UsePtrType) {
2944     VT = TLI.getPointerTy(DAG.getDataLayout());
2945     Sub = DAG.getZExtOrTrunc(Sub, dl, VT);
2946   }
2947 
2948   B.RegVT = VT.getSimpleVT();
2949   B.Reg = FuncInfo.CreateReg(B.RegVT);
2950   SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl, B.Reg, Sub);
2951 
2952   MachineBasicBlock* MBB = B.Cases[0].ThisBB;
2953 
2954   if (!B.FallthroughUnreachable)
2955     addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb);
2956   addSuccessorWithProb(SwitchBB, MBB, B.Prob);
2957   SwitchBB->normalizeSuccProbs();
2958 
2959   SDValue Root = CopyTo;
2960   if (!B.FallthroughUnreachable) {
2961     // Conditional branch to the default block.
2962     SDValue RangeCmp = DAG.getSetCC(dl,
2963         TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
2964                                RangeSub.getValueType()),
2965         RangeSub, DAG.getConstant(B.Range, dl, RangeSub.getValueType()),
2966         ISD::SETUGT);
2967 
2968     Root = DAG.getNode(ISD::BRCOND, dl, MVT::Other, Root, RangeCmp,
2969                        DAG.getBasicBlock(B.Default));
2970   }
2971 
2972   // Avoid emitting unnecessary branches to the next block.
2973   if (MBB != NextBlock(SwitchBB))
2974     Root = DAG.getNode(ISD::BR, dl, MVT::Other, Root, DAG.getBasicBlock(MBB));
2975 
2976   DAG.setRoot(Root);
2977 }
2978 
2979 /// visitBitTestCase - this function produces one "bit test"
2980 void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB,
2981                                            MachineBasicBlock* NextMBB,
2982                                            BranchProbability BranchProbToNext,
2983                                            unsigned Reg,
2984                                            BitTestCase &B,
2985                                            MachineBasicBlock *SwitchBB) {
2986   SDLoc dl = getCurSDLoc();
2987   MVT VT = BB.RegVT;
2988   SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), dl, Reg, VT);
2989   SDValue Cmp;
2990   unsigned PopCount = llvm::popcount(B.Mask);
2991   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2992   if (PopCount == 1) {
2993     // Testing for a single bit; just compare the shift count with what it
2994     // would need to be to shift a 1 bit in that position.
2995     Cmp = DAG.getSetCC(
2996         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2997         ShiftOp, DAG.getConstant(llvm::countr_zero(B.Mask), dl, VT),
2998         ISD::SETEQ);
2999   } else if (PopCount == BB.Range) {
3000     // There is only one zero bit in the range, test for it directly.
3001     Cmp = DAG.getSetCC(
3002         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
3003         ShiftOp, DAG.getConstant(llvm::countr_one(B.Mask), dl, VT), ISD::SETNE);
3004   } else {
3005     // Make desired shift
3006     SDValue SwitchVal = DAG.getNode(ISD::SHL, dl, VT,
3007                                     DAG.getConstant(1, dl, VT), ShiftOp);
3008 
3009     // Emit bit tests and jumps
3010     SDValue AndOp = DAG.getNode(ISD::AND, dl,
3011                                 VT, SwitchVal, DAG.getConstant(B.Mask, dl, VT));
3012     Cmp = DAG.getSetCC(
3013         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
3014         AndOp, DAG.getConstant(0, dl, VT), ISD::SETNE);
3015   }
3016 
3017   // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
3018   addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb);
3019   // The branch probability from SwitchBB to NextMBB is BranchProbToNext.
3020   addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
3021   // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is
3022   // one as they are relative probabilities (and thus work more like weights),
3023   // and hence we need to normalize them to let the sum of them become one.
3024   SwitchBB->normalizeSuccProbs();
3025 
3026   SDValue BrAnd = DAG.getNode(ISD::BRCOND, dl,
3027                               MVT::Other, getControlRoot(),
3028                               Cmp, DAG.getBasicBlock(B.TargetBB));
3029 
3030   // Avoid emitting unnecessary branches to the next block.
3031   if (NextMBB != NextBlock(SwitchBB))
3032     BrAnd = DAG.getNode(ISD::BR, dl, MVT::Other, BrAnd,
3033                         DAG.getBasicBlock(NextMBB));
3034 
3035   DAG.setRoot(BrAnd);
3036 }
3037 
3038 void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
3039   MachineBasicBlock *InvokeMBB = FuncInfo.MBB;
3040 
3041   // Retrieve successors. Look through artificial IR level blocks like
3042   // catchswitch for successors.
3043   MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
3044   const BasicBlock *EHPadBB = I.getSuccessor(1);
3045   MachineBasicBlock *EHPadMBB = FuncInfo.MBBMap[EHPadBB];
3046 
3047   // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
3048   // have to do anything here to lower funclet bundles.
3049   assert(!I.hasOperandBundlesOtherThan(
3050              {LLVMContext::OB_deopt, LLVMContext::OB_gc_transition,
3051               LLVMContext::OB_gc_live, LLVMContext::OB_funclet,
3052               LLVMContext::OB_cfguardtarget,
3053               LLVMContext::OB_clang_arc_attachedcall}) &&
3054          "Cannot lower invokes with arbitrary operand bundles yet!");
3055 
3056   const Value *Callee(I.getCalledOperand());
3057   const Function *Fn = dyn_cast<Function>(Callee);
3058   if (isa<InlineAsm>(Callee))
3059     visitInlineAsm(I, EHPadBB);
3060   else if (Fn && Fn->isIntrinsic()) {
3061     switch (Fn->getIntrinsicID()) {
3062     default:
3063       llvm_unreachable("Cannot invoke this intrinsic");
3064     case Intrinsic::donothing:
3065       // Ignore invokes to @llvm.donothing: jump directly to the next BB.
3066     case Intrinsic::seh_try_begin:
3067     case Intrinsic::seh_scope_begin:
3068     case Intrinsic::seh_try_end:
3069     case Intrinsic::seh_scope_end:
3070       if (EHPadMBB)
3071           // a block referenced by EH table
3072           // so dtor-funclet not removed by opts
3073           EHPadMBB->setMachineBlockAddressTaken();
3074       break;
3075     case Intrinsic::experimental_patchpoint_void:
3076     case Intrinsic::experimental_patchpoint_i64:
3077       visitPatchpoint(I, EHPadBB);
3078       break;
3079     case Intrinsic::experimental_gc_statepoint:
3080       LowerStatepoint(cast<GCStatepointInst>(I), EHPadBB);
3081       break;
3082     case Intrinsic::wasm_rethrow: {
3083       // This is usually done in visitTargetIntrinsic, but this intrinsic is
3084       // special because it can be invoked, so we manually lower it to a DAG
3085       // node here.
3086       SmallVector<SDValue, 8> Ops;
3087       Ops.push_back(getRoot()); // inchain
3088       const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3089       Ops.push_back(
3090           DAG.getTargetConstant(Intrinsic::wasm_rethrow, getCurSDLoc(),
3091                                 TLI.getPointerTy(DAG.getDataLayout())));
3092       SDVTList VTs = DAG.getVTList(ArrayRef<EVT>({MVT::Other})); // outchain
3093       DAG.setRoot(DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops));
3094       break;
3095     }
3096     }
3097   } else if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) {
3098     // Currently we do not lower any intrinsic calls with deopt operand bundles.
3099     // Eventually we will support lowering the @llvm.experimental.deoptimize
3100     // intrinsic, and right now there are no plans to support other intrinsics
3101     // with deopt state.
3102     LowerCallSiteWithDeoptBundle(&I, getValue(Callee), EHPadBB);
3103   } else {
3104     LowerCallTo(I, getValue(Callee), false, false, EHPadBB);
3105   }
3106 
3107   // If the value of the invoke is used outside of its defining block, make it
3108   // available as a virtual register.
3109   // We already took care of the exported value for the statepoint instruction
3110   // during call to the LowerStatepoint.
3111   if (!isa<GCStatepointInst>(I)) {
3112     CopyToExportRegsIfNeeded(&I);
3113   }
3114 
3115   SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
3116   BranchProbabilityInfo *BPI = FuncInfo.BPI;
3117   BranchProbability EHPadBBProb =
3118       BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB)
3119           : BranchProbability::getZero();
3120   findUnwindDestinations(FuncInfo, EHPadBB, EHPadBBProb, UnwindDests);
3121 
3122   // Update successor info.
3123   addSuccessorWithProb(InvokeMBB, Return);
3124   for (auto &UnwindDest : UnwindDests) {
3125     UnwindDest.first->setIsEHPad();
3126     addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
3127   }
3128   InvokeMBB->normalizeSuccProbs();
3129 
3130   // Drop into normal successor.
3131   DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, getControlRoot(),
3132                           DAG.getBasicBlock(Return)));
3133 }
3134 
3135 void SelectionDAGBuilder::visitCallBr(const CallBrInst &I) {
3136   MachineBasicBlock *CallBrMBB = FuncInfo.MBB;
3137 
3138   // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
3139   // have to do anything here to lower funclet bundles.
3140   assert(!I.hasOperandBundlesOtherThan(
3141              {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) &&
3142          "Cannot lower callbrs with arbitrary operand bundles yet!");
3143 
3144   assert(I.isInlineAsm() && "Only know how to handle inlineasm callbr");
3145   visitInlineAsm(I);
3146   CopyToExportRegsIfNeeded(&I);
3147 
3148   // Retrieve successors.
3149   SmallPtrSet<BasicBlock *, 8> Dests;
3150   Dests.insert(I.getDefaultDest());
3151   MachineBasicBlock *Return = FuncInfo.MBBMap[I.getDefaultDest()];
3152 
3153   // Update successor info.
3154   addSuccessorWithProb(CallBrMBB, Return, BranchProbability::getOne());
3155   for (unsigned i = 0, e = I.getNumIndirectDests(); i < e; ++i) {
3156     BasicBlock *Dest = I.getIndirectDest(i);
3157     MachineBasicBlock *Target = FuncInfo.MBBMap[Dest];
3158     Target->setIsInlineAsmBrIndirectTarget();
3159     Target->setMachineBlockAddressTaken();
3160     Target->setLabelMustBeEmitted();
3161     // Don't add duplicate machine successors.
3162     if (Dests.insert(Dest).second)
3163       addSuccessorWithProb(CallBrMBB, Target, BranchProbability::getZero());
3164   }
3165   CallBrMBB->normalizeSuccProbs();
3166 
3167   // Drop into default successor.
3168   DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
3169                           MVT::Other, getControlRoot(),
3170                           DAG.getBasicBlock(Return)));
3171 }
3172 
3173 void SelectionDAGBuilder::visitResume(const ResumeInst &RI) {
3174   llvm_unreachable("SelectionDAGBuilder shouldn't visit resume instructions!");
3175 }
3176 
3177 void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) {
3178   assert(FuncInfo.MBB->isEHPad() &&
3179          "Call to landingpad not in landing pad!");
3180 
3181   // If there aren't registers to copy the values into (e.g., during SjLj
3182   // exceptions), then don't bother to create these DAG nodes.
3183   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3184   const Constant *PersonalityFn = FuncInfo.Fn->getPersonalityFn();
3185   if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
3186       TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
3187     return;
3188 
3189   // If landingpad's return type is token type, we don't create DAG nodes
3190   // for its exception pointer and selector value. The extraction of exception
3191   // pointer or selector value from token type landingpads is not currently
3192   // supported.
3193   if (LP.getType()->isTokenTy())
3194     return;
3195 
3196   SmallVector<EVT, 2> ValueVTs;
3197   SDLoc dl = getCurSDLoc();
3198   ComputeValueVTs(TLI, DAG.getDataLayout(), LP.getType(), ValueVTs);
3199   assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported");
3200 
3201   // Get the two live-in registers as SDValues. The physregs have already been
3202   // copied into virtual registers.
3203   SDValue Ops[2];
3204   if (FuncInfo.ExceptionPointerVirtReg) {
3205     Ops[0] = DAG.getZExtOrTrunc(
3206         DAG.getCopyFromReg(DAG.getEntryNode(), dl,
3207                            FuncInfo.ExceptionPointerVirtReg,
3208                            TLI.getPointerTy(DAG.getDataLayout())),
3209         dl, ValueVTs[0]);
3210   } else {
3211     Ops[0] = DAG.getConstant(0, dl, TLI.getPointerTy(DAG.getDataLayout()));
3212   }
3213   Ops[1] = DAG.getZExtOrTrunc(
3214       DAG.getCopyFromReg(DAG.getEntryNode(), dl,
3215                          FuncInfo.ExceptionSelectorVirtReg,
3216                          TLI.getPointerTy(DAG.getDataLayout())),
3217       dl, ValueVTs[1]);
3218 
3219   // Merge into one.
3220   SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl,
3221                             DAG.getVTList(ValueVTs), Ops);
3222   setValue(&LP, Res);
3223 }
3224 
3225 void SelectionDAGBuilder::UpdateSplitBlock(MachineBasicBlock *First,
3226                                            MachineBasicBlock *Last) {
3227   // Update JTCases.
3228   for (JumpTableBlock &JTB : SL->JTCases)
3229     if (JTB.first.HeaderBB == First)
3230       JTB.first.HeaderBB = Last;
3231 
3232   // Update BitTestCases.
3233   for (BitTestBlock &BTB : SL->BitTestCases)
3234     if (BTB.Parent == First)
3235       BTB.Parent = Last;
3236 }
3237 
3238 void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
3239   MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB;
3240 
3241   // Update machine-CFG edges with unique successors.
3242   SmallSet<BasicBlock*, 32> Done;
3243   for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) {
3244     BasicBlock *BB = I.getSuccessor(i);
3245     bool Inserted = Done.insert(BB).second;
3246     if (!Inserted)
3247         continue;
3248 
3249     MachineBasicBlock *Succ = FuncInfo.MBBMap[BB];
3250     addSuccessorWithProb(IndirectBrMBB, Succ);
3251   }
3252   IndirectBrMBB->normalizeSuccProbs();
3253 
3254   DAG.setRoot(DAG.getNode(ISD::BRIND, getCurSDLoc(),
3255                           MVT::Other, getControlRoot(),
3256                           getValue(I.getAddress())));
3257 }
3258 
3259 void SelectionDAGBuilder::visitUnreachable(const UnreachableInst &I) {
3260   if (!DAG.getTarget().Options.TrapUnreachable)
3261     return;
3262 
3263   // We may be able to ignore unreachable behind a noreturn call.
3264   if (DAG.getTarget().Options.NoTrapAfterNoreturn) {
3265     if (const CallInst *Call = dyn_cast_or_null<CallInst>(I.getPrevNode())) {
3266       if (Call->doesNotReturn())
3267         return;
3268     }
3269   }
3270 
3271   DAG.setRoot(DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, DAG.getRoot()));
3272 }
3273 
3274 void SelectionDAGBuilder::visitUnary(const User &I, unsigned Opcode) {
3275   SDNodeFlags Flags;
3276   if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
3277     Flags.copyFMF(*FPOp);
3278 
3279   SDValue Op = getValue(I.getOperand(0));
3280   SDValue UnNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op.getValueType(),
3281                                     Op, Flags);
3282   setValue(&I, UnNodeValue);
3283 }
3284 
3285 void SelectionDAGBuilder::visitBinary(const User &I, unsigned Opcode) {
3286   SDNodeFlags Flags;
3287   if (auto *OFBinOp = dyn_cast<OverflowingBinaryOperator>(&I)) {
3288     Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap());
3289     Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap());
3290   }
3291   if (auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I))
3292     Flags.setExact(ExactOp->isExact());
3293   if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
3294     Flags.copyFMF(*FPOp);
3295 
3296   SDValue Op1 = getValue(I.getOperand(0));
3297   SDValue Op2 = getValue(I.getOperand(1));
3298   SDValue BinNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(),
3299                                      Op1, Op2, Flags);
3300   setValue(&I, BinNodeValue);
3301 }
3302 
3303 void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
3304   SDValue Op1 = getValue(I.getOperand(0));
3305   SDValue Op2 = getValue(I.getOperand(1));
3306 
3307   EVT ShiftTy = DAG.getTargetLoweringInfo().getShiftAmountTy(
3308       Op1.getValueType(), DAG.getDataLayout());
3309 
3310   // Coerce the shift amount to the right type if we can. This exposes the
3311   // truncate or zext to optimization early.
3312   if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {
3313     assert(ShiftTy.getSizeInBits() >= Log2_32_Ceil(Op1.getValueSizeInBits()) &&
3314            "Unexpected shift type");
3315     Op2 = DAG.getZExtOrTrunc(Op2, getCurSDLoc(), ShiftTy);
3316   }
3317 
3318   bool nuw = false;
3319   bool nsw = false;
3320   bool exact = false;
3321 
3322   if (Opcode == ISD::SRL || Opcode == ISD::SRA || Opcode == ISD::SHL) {
3323 
3324     if (const OverflowingBinaryOperator *OFBinOp =
3325             dyn_cast<const OverflowingBinaryOperator>(&I)) {
3326       nuw = OFBinOp->hasNoUnsignedWrap();
3327       nsw = OFBinOp->hasNoSignedWrap();
3328     }
3329     if (const PossiblyExactOperator *ExactOp =
3330             dyn_cast<const PossiblyExactOperator>(&I))
3331       exact = ExactOp->isExact();
3332   }
3333   SDNodeFlags Flags;
3334   Flags.setExact(exact);
3335   Flags.setNoSignedWrap(nsw);
3336   Flags.setNoUnsignedWrap(nuw);
3337   SDValue Res = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(), Op1, Op2,
3338                             Flags);
3339   setValue(&I, Res);
3340 }
3341 
3342 void SelectionDAGBuilder::visitSDiv(const User &I) {
3343   SDValue Op1 = getValue(I.getOperand(0));
3344   SDValue Op2 = getValue(I.getOperand(1));
3345 
3346   SDNodeFlags Flags;
3347   Flags.setExact(isa<PossiblyExactOperator>(&I) &&
3348                  cast<PossiblyExactOperator>(&I)->isExact());
3349   setValue(&I, DAG.getNode(ISD::SDIV, getCurSDLoc(), Op1.getValueType(), Op1,
3350                            Op2, Flags));
3351 }
3352 
3353 void SelectionDAGBuilder::visitICmp(const User &I) {
3354   ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
3355   if (const ICmpInst *IC = dyn_cast<ICmpInst>(&I))
3356     predicate = IC->getPredicate();
3357   else if (const ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
3358     predicate = ICmpInst::Predicate(IC->getPredicate());
3359   SDValue Op1 = getValue(I.getOperand(0));
3360   SDValue Op2 = getValue(I.getOperand(1));
3361   ISD::CondCode Opcode = getICmpCondCode(predicate);
3362 
3363   auto &TLI = DAG.getTargetLoweringInfo();
3364   EVT MemVT =
3365       TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
3366 
3367   // If a pointer's DAG type is larger than its memory type then the DAG values
3368   // are zero-extended. This breaks signed comparisons so truncate back to the
3369   // underlying type before doing the compare.
3370   if (Op1.getValueType() != MemVT) {
3371     Op1 = DAG.getPtrExtOrTrunc(Op1, getCurSDLoc(), MemVT);
3372     Op2 = DAG.getPtrExtOrTrunc(Op2, getCurSDLoc(), MemVT);
3373   }
3374 
3375   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3376                                                         I.getType());
3377   setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Opcode));
3378 }
3379 
3380 void SelectionDAGBuilder::visitFCmp(const User &I) {
3381   FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
3382   if (const FCmpInst *FC = dyn_cast<FCmpInst>(&I))
3383     predicate = FC->getPredicate();
3384   else if (const ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
3385     predicate = FCmpInst::Predicate(FC->getPredicate());
3386   SDValue Op1 = getValue(I.getOperand(0));
3387   SDValue Op2 = getValue(I.getOperand(1));
3388 
3389   ISD::CondCode Condition = getFCmpCondCode(predicate);
3390   auto *FPMO = cast<FPMathOperator>(&I);
3391   if (FPMO->hasNoNaNs() || TM.Options.NoNaNsFPMath)
3392     Condition = getFCmpCodeWithoutNaN(Condition);
3393 
3394   SDNodeFlags Flags;
3395   Flags.copyFMF(*FPMO);
3396   SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
3397 
3398   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3399                                                         I.getType());
3400   setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition));
3401 }
3402 
3403 // Check if the condition of the select has one use or two users that are both
3404 // selects with the same condition.
3405 static bool hasOnlySelectUsers(const Value *Cond) {
3406   return llvm::all_of(Cond->users(), [](const Value *V) {
3407     return isa<SelectInst>(V);
3408   });
3409 }
3410 
3411 void SelectionDAGBuilder::visitSelect(const User &I) {
3412   SmallVector<EVT, 4> ValueVTs;
3413   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
3414                   ValueVTs);
3415   unsigned NumValues = ValueVTs.size();
3416   if (NumValues == 0) return;
3417 
3418   SmallVector<SDValue, 4> Values(NumValues);
3419   SDValue Cond     = getValue(I.getOperand(0));
3420   SDValue LHSVal   = getValue(I.getOperand(1));
3421   SDValue RHSVal   = getValue(I.getOperand(2));
3422   SmallVector<SDValue, 1> BaseOps(1, Cond);
3423   ISD::NodeType OpCode =
3424       Cond.getValueType().isVector() ? ISD::VSELECT : ISD::SELECT;
3425 
3426   bool IsUnaryAbs = false;
3427   bool Negate = false;
3428 
3429   SDNodeFlags Flags;
3430   if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
3431     Flags.copyFMF(*FPOp);
3432 
3433   Flags.setUnpredictable(
3434       cast<SelectInst>(I).getMetadata(LLVMContext::MD_unpredictable));
3435 
3436   // Min/max matching is only viable if all output VTs are the same.
3437   if (all_equal(ValueVTs)) {
3438     EVT VT = ValueVTs[0];
3439     LLVMContext &Ctx = *DAG.getContext();
3440     auto &TLI = DAG.getTargetLoweringInfo();
3441 
3442     // We care about the legality of the operation after it has been type
3443     // legalized.
3444     while (TLI.getTypeAction(Ctx, VT) != TargetLoweringBase::TypeLegal)
3445       VT = TLI.getTypeToTransformTo(Ctx, VT);
3446 
3447     // If the vselect is legal, assume we want to leave this as a vector setcc +
3448     // vselect. Otherwise, if this is going to be scalarized, we want to see if
3449     // min/max is legal on the scalar type.
3450     bool UseScalarMinMax = VT.isVector() &&
3451       !TLI.isOperationLegalOrCustom(ISD::VSELECT, VT);
3452 
3453     // ValueTracking's select pattern matching does not account for -0.0,
3454     // so we can't lower to FMINIMUM/FMAXIMUM because those nodes specify that
3455     // -0.0 is less than +0.0.
3456     Value *LHS, *RHS;
3457     auto SPR = matchSelectPattern(const_cast<User*>(&I), LHS, RHS);
3458     ISD::NodeType Opc = ISD::DELETED_NODE;
3459     switch (SPR.Flavor) {
3460     case SPF_UMAX:    Opc = ISD::UMAX; break;
3461     case SPF_UMIN:    Opc = ISD::UMIN; break;
3462     case SPF_SMAX:    Opc = ISD::SMAX; break;
3463     case SPF_SMIN:    Opc = ISD::SMIN; break;
3464     case SPF_FMINNUM:
3465       switch (SPR.NaNBehavior) {
3466       case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
3467       case SPNB_RETURNS_NAN: break;
3468       case SPNB_RETURNS_OTHER: Opc = ISD::FMINNUM; break;
3469       case SPNB_RETURNS_ANY:
3470         if (TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT) ||
3471             (UseScalarMinMax &&
3472              TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT.getScalarType())))
3473           Opc = ISD::FMINNUM;
3474         break;
3475       }
3476       break;
3477     case SPF_FMAXNUM:
3478       switch (SPR.NaNBehavior) {
3479       case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
3480       case SPNB_RETURNS_NAN: break;
3481       case SPNB_RETURNS_OTHER: Opc = ISD::FMAXNUM; break;
3482       case SPNB_RETURNS_ANY:
3483         if (TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT) ||
3484             (UseScalarMinMax &&
3485              TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT.getScalarType())))
3486           Opc = ISD::FMAXNUM;
3487         break;
3488       }
3489       break;
3490     case SPF_NABS:
3491       Negate = true;
3492       [[fallthrough]];
3493     case SPF_ABS:
3494       IsUnaryAbs = true;
3495       Opc = ISD::ABS;
3496       break;
3497     default: break;
3498     }
3499 
3500     if (!IsUnaryAbs && Opc != ISD::DELETED_NODE &&
3501         (TLI.isOperationLegalOrCustomOrPromote(Opc, VT) ||
3502          (UseScalarMinMax &&
3503           TLI.isOperationLegalOrCustom(Opc, VT.getScalarType()))) &&
3504         // If the underlying comparison instruction is used by any other
3505         // instruction, the consumed instructions won't be destroyed, so it is
3506         // not profitable to convert to a min/max.
3507         hasOnlySelectUsers(cast<SelectInst>(I).getCondition())) {
3508       OpCode = Opc;
3509       LHSVal = getValue(LHS);
3510       RHSVal = getValue(RHS);
3511       BaseOps.clear();
3512     }
3513 
3514     if (IsUnaryAbs) {
3515       OpCode = Opc;
3516       LHSVal = getValue(LHS);
3517       BaseOps.clear();
3518     }
3519   }
3520 
3521   if (IsUnaryAbs) {
3522     for (unsigned i = 0; i != NumValues; ++i) {
3523       SDLoc dl = getCurSDLoc();
3524       EVT VT = LHSVal.getNode()->getValueType(LHSVal.getResNo() + i);
3525       Values[i] =
3526           DAG.getNode(OpCode, dl, VT, LHSVal.getValue(LHSVal.getResNo() + i));
3527       if (Negate)
3528         Values[i] = DAG.getNegative(Values[i], dl, VT);
3529     }
3530   } else {
3531     for (unsigned i = 0; i != NumValues; ++i) {
3532       SmallVector<SDValue, 3> Ops(BaseOps.begin(), BaseOps.end());
3533       Ops.push_back(SDValue(LHSVal.getNode(), LHSVal.getResNo() + i));
3534       Ops.push_back(SDValue(RHSVal.getNode(), RHSVal.getResNo() + i));
3535       Values[i] = DAG.getNode(
3536           OpCode, getCurSDLoc(),
3537           LHSVal.getNode()->getValueType(LHSVal.getResNo() + i), Ops, Flags);
3538     }
3539   }
3540 
3541   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3542                            DAG.getVTList(ValueVTs), Values));
3543 }
3544 
3545 void SelectionDAGBuilder::visitTrunc(const User &I) {
3546   // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
3547   SDValue N = getValue(I.getOperand(0));
3548   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3549                                                         I.getType());
3550   setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), DestVT, N));
3551 }
3552 
3553 void SelectionDAGBuilder::visitZExt(const User &I) {
3554   // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3555   // ZExt also can't be a cast to bool for same reason. So, nothing much to do
3556   SDValue N = getValue(I.getOperand(0));
3557   auto &TLI = DAG.getTargetLoweringInfo();
3558   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3559 
3560   SDNodeFlags Flags;
3561   if (auto *PNI = dyn_cast<PossiblyNonNegInst>(&I))
3562     Flags.setNonNeg(PNI->hasNonNeg());
3563 
3564   // Eagerly use nonneg information to canonicalize towards sign_extend if
3565   // that is the target's preference.
3566   // TODO: Let the target do this later.
3567   if (Flags.hasNonNeg() &&
3568       TLI.isSExtCheaperThanZExt(N.getValueType(), DestVT)) {
3569     setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N));
3570     return;
3571   }
3572 
3573   setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurSDLoc(), DestVT, N, Flags));
3574 }
3575 
3576 void SelectionDAGBuilder::visitSExt(const User &I) {
3577   // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3578   // SExt also can't be a cast to bool for same reason. So, nothing much to do
3579   SDValue N = getValue(I.getOperand(0));
3580   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3581                                                         I.getType());
3582   setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N));
3583 }
3584 
3585 void SelectionDAGBuilder::visitFPTrunc(const User &I) {
3586   // FPTrunc is never a no-op cast, no need to check
3587   SDValue N = getValue(I.getOperand(0));
3588   SDLoc dl = getCurSDLoc();
3589   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3590   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3591   setValue(&I, DAG.getNode(ISD::FP_ROUND, dl, DestVT, N,
3592                            DAG.getTargetConstant(
3593                                0, dl, TLI.getPointerTy(DAG.getDataLayout()))));
3594 }
3595 
3596 void SelectionDAGBuilder::visitFPExt(const User &I) {
3597   // FPExt is never a no-op cast, no need to check
3598   SDValue N = getValue(I.getOperand(0));
3599   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3600                                                         I.getType());
3601   setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurSDLoc(), DestVT, N));
3602 }
3603 
3604 void SelectionDAGBuilder::visitFPToUI(const User &I) {
3605   // FPToUI is never a no-op cast, no need to check
3606   SDValue N = getValue(I.getOperand(0));
3607   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3608                                                         I.getType());
3609   setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurSDLoc(), DestVT, N));
3610 }
3611 
3612 void SelectionDAGBuilder::visitFPToSI(const User &I) {
3613   // FPToSI is never a no-op cast, no need to check
3614   SDValue N = getValue(I.getOperand(0));
3615   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3616                                                         I.getType());
3617   setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurSDLoc(), DestVT, N));
3618 }
3619 
3620 void SelectionDAGBuilder::visitUIToFP(const User &I) {
3621   // UIToFP is never a no-op cast, no need to check
3622   SDValue N = getValue(I.getOperand(0));
3623   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3624                                                         I.getType());
3625   setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurSDLoc(), DestVT, N));
3626 }
3627 
3628 void SelectionDAGBuilder::visitSIToFP(const User &I) {
3629   // SIToFP is never a no-op cast, no need to check
3630   SDValue N = getValue(I.getOperand(0));
3631   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3632                                                         I.getType());
3633   setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurSDLoc(), DestVT, N));
3634 }
3635 
3636 void SelectionDAGBuilder::visitPtrToInt(const User &I) {
3637   // What to do depends on the size of the integer and the size of the pointer.
3638   // We can either truncate, zero extend, or no-op, accordingly.
3639   SDValue N = getValue(I.getOperand(0));
3640   auto &TLI = DAG.getTargetLoweringInfo();
3641   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3642                                                         I.getType());
3643   EVT PtrMemVT =
3644       TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
3645   N = DAG.getPtrExtOrTrunc(N, getCurSDLoc(), PtrMemVT);
3646   N = DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT);
3647   setValue(&I, N);
3648 }
3649 
3650 void SelectionDAGBuilder::visitIntToPtr(const User &I) {
3651   // What to do depends on the size of the integer and the size of the pointer.
3652   // We can either truncate, zero extend, or no-op, accordingly.
3653   SDValue N = getValue(I.getOperand(0));
3654   auto &TLI = DAG.getTargetLoweringInfo();
3655   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3656   EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getType());
3657   N = DAG.getZExtOrTrunc(N, getCurSDLoc(), PtrMemVT);
3658   N = DAG.getPtrExtOrTrunc(N, getCurSDLoc(), DestVT);
3659   setValue(&I, N);
3660 }
3661 
3662 void SelectionDAGBuilder::visitBitCast(const User &I) {
3663   SDValue N = getValue(I.getOperand(0));
3664   SDLoc dl = getCurSDLoc();
3665   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3666                                                         I.getType());
3667 
3668   // BitCast assures us that source and destination are the same size so this is
3669   // either a BITCAST or a no-op.
3670   if (DestVT != N.getValueType())
3671     setValue(&I, DAG.getNode(ISD::BITCAST, dl,
3672                              DestVT, N)); // convert types.
3673   // Check if the original LLVM IR Operand was a ConstantInt, because getValue()
3674   // might fold any kind of constant expression to an integer constant and that
3675   // is not what we are looking for. Only recognize a bitcast of a genuine
3676   // constant integer as an opaque constant.
3677   else if(ConstantInt *C = dyn_cast<ConstantInt>(I.getOperand(0)))
3678     setValue(&I, DAG.getConstant(C->getValue(), dl, DestVT, /*isTarget=*/false,
3679                                  /*isOpaque*/true));
3680   else
3681     setValue(&I, N);            // noop cast.
3682 }
3683 
3684 void SelectionDAGBuilder::visitAddrSpaceCast(const User &I) {
3685   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3686   const Value *SV = I.getOperand(0);
3687   SDValue N = getValue(SV);
3688   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3689 
3690   unsigned SrcAS = SV->getType()->getPointerAddressSpace();
3691   unsigned DestAS = I.getType()->getPointerAddressSpace();
3692 
3693   if (!TM.isNoopAddrSpaceCast(SrcAS, DestAS))
3694     N = DAG.getAddrSpaceCast(getCurSDLoc(), DestVT, N, SrcAS, DestAS);
3695 
3696   setValue(&I, N);
3697 }
3698 
3699 void SelectionDAGBuilder::visitInsertElement(const User &I) {
3700   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3701   SDValue InVec = getValue(I.getOperand(0));
3702   SDValue InVal = getValue(I.getOperand(1));
3703   SDValue InIdx = DAG.getZExtOrTrunc(getValue(I.getOperand(2)), getCurSDLoc(),
3704                                      TLI.getVectorIdxTy(DAG.getDataLayout()));
3705   setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurSDLoc(),
3706                            TLI.getValueType(DAG.getDataLayout(), I.getType()),
3707                            InVec, InVal, InIdx));
3708 }
3709 
3710 void SelectionDAGBuilder::visitExtractElement(const User &I) {
3711   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3712   SDValue InVec = getValue(I.getOperand(0));
3713   SDValue InIdx = DAG.getZExtOrTrunc(getValue(I.getOperand(1)), getCurSDLoc(),
3714                                      TLI.getVectorIdxTy(DAG.getDataLayout()));
3715   setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(),
3716                            TLI.getValueType(DAG.getDataLayout(), I.getType()),
3717                            InVec, InIdx));
3718 }
3719 
3720 void SelectionDAGBuilder::visitShuffleVector(const User &I) {
3721   SDValue Src1 = getValue(I.getOperand(0));
3722   SDValue Src2 = getValue(I.getOperand(1));
3723   ArrayRef<int> Mask;
3724   if (auto *SVI = dyn_cast<ShuffleVectorInst>(&I))
3725     Mask = SVI->getShuffleMask();
3726   else
3727     Mask = cast<ConstantExpr>(I).getShuffleMask();
3728   SDLoc DL = getCurSDLoc();
3729   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3730   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3731   EVT SrcVT = Src1.getValueType();
3732 
3733   if (all_of(Mask, [](int Elem) { return Elem == 0; }) &&
3734       VT.isScalableVector()) {
3735     // Canonical splat form of first element of first input vector.
3736     SDValue FirstElt =
3737         DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SrcVT.getScalarType(), Src1,
3738                     DAG.getVectorIdxConstant(0, DL));
3739     setValue(&I, DAG.getNode(ISD::SPLAT_VECTOR, DL, VT, FirstElt));
3740     return;
3741   }
3742 
3743   // For now, we only handle splats for scalable vectors.
3744   // The DAGCombiner will perform a BUILD_VECTOR -> SPLAT_VECTOR transformation
3745   // for targets that support a SPLAT_VECTOR for non-scalable vector types.
3746   assert(!VT.isScalableVector() && "Unsupported scalable vector shuffle");
3747 
3748   unsigned SrcNumElts = SrcVT.getVectorNumElements();
3749   unsigned MaskNumElts = Mask.size();
3750 
3751   if (SrcNumElts == MaskNumElts) {
3752     setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, Mask));
3753     return;
3754   }
3755 
3756   // Normalize the shuffle vector since mask and vector length don't match.
3757   if (SrcNumElts < MaskNumElts) {
3758     // Mask is longer than the source vectors. We can use concatenate vector to
3759     // make the mask and vectors lengths match.
3760 
3761     if (MaskNumElts % SrcNumElts == 0) {
3762       // Mask length is a multiple of the source vector length.
3763       // Check if the shuffle is some kind of concatenation of the input
3764       // vectors.
3765       unsigned NumConcat = MaskNumElts / SrcNumElts;
3766       bool IsConcat = true;
3767       SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
3768       for (unsigned i = 0; i != MaskNumElts; ++i) {
3769         int Idx = Mask[i];
3770         if (Idx < 0)
3771           continue;
3772         // Ensure the indices in each SrcVT sized piece are sequential and that
3773         // the same source is used for the whole piece.
3774         if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
3775             (ConcatSrcs[i / SrcNumElts] >= 0 &&
3776              ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts))) {
3777           IsConcat = false;
3778           break;
3779         }
3780         // Remember which source this index came from.
3781         ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
3782       }
3783 
3784       // The shuffle is concatenating multiple vectors together. Just emit
3785       // a CONCAT_VECTORS operation.
3786       if (IsConcat) {
3787         SmallVector<SDValue, 8> ConcatOps;
3788         for (auto Src : ConcatSrcs) {
3789           if (Src < 0)
3790             ConcatOps.push_back(DAG.getUNDEF(SrcVT));
3791           else if (Src == 0)
3792             ConcatOps.push_back(Src1);
3793           else
3794             ConcatOps.push_back(Src2);
3795         }
3796         setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps));
3797         return;
3798       }
3799     }
3800 
3801     unsigned PaddedMaskNumElts = alignTo(MaskNumElts, SrcNumElts);
3802     unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
3803     EVT PaddedVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(),
3804                                     PaddedMaskNumElts);
3805 
3806     // Pad both vectors with undefs to make them the same length as the mask.
3807     SDValue UndefVal = DAG.getUNDEF(SrcVT);
3808 
3809     SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
3810     SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
3811     MOps1[0] = Src1;
3812     MOps2[0] = Src2;
3813 
3814     Src1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps1);
3815     Src2 = DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps2);
3816 
3817     // Readjust mask for new input vector length.
3818     SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1);
3819     for (unsigned i = 0; i != MaskNumElts; ++i) {
3820       int Idx = Mask[i];
3821       if (Idx >= (int)SrcNumElts)
3822         Idx -= SrcNumElts - PaddedMaskNumElts;
3823       MappedOps[i] = Idx;
3824     }
3825 
3826     SDValue Result = DAG.getVectorShuffle(PaddedVT, DL, Src1, Src2, MappedOps);
3827 
3828     // If the concatenated vector was padded, extract a subvector with the
3829     // correct number of elements.
3830     if (MaskNumElts != PaddedMaskNumElts)
3831       Result = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Result,
3832                            DAG.getVectorIdxConstant(0, DL));
3833 
3834     setValue(&I, Result);
3835     return;
3836   }
3837 
3838   if (SrcNumElts > MaskNumElts) {
3839     // Analyze the access pattern of the vector to see if we can extract
3840     // two subvectors and do the shuffle.
3841     int StartIdx[2] = { -1, -1 };  // StartIdx to extract from
3842     bool CanExtract = true;
3843     for (int Idx : Mask) {
3844       unsigned Input = 0;
3845       if (Idx < 0)
3846         continue;
3847 
3848       if (Idx >= (int)SrcNumElts) {
3849         Input = 1;
3850         Idx -= SrcNumElts;
3851       }
3852 
3853       // If all the indices come from the same MaskNumElts sized portion of
3854       // the sources we can use extract. Also make sure the extract wouldn't
3855       // extract past the end of the source.
3856       int NewStartIdx = alignDown(Idx, MaskNumElts);
3857       if (NewStartIdx + MaskNumElts > SrcNumElts ||
3858           (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
3859         CanExtract = false;
3860       // Make sure we always update StartIdx as we use it to track if all
3861       // elements are undef.
3862       StartIdx[Input] = NewStartIdx;
3863     }
3864 
3865     if (StartIdx[0] < 0 && StartIdx[1] < 0) {
3866       setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
3867       return;
3868     }
3869     if (CanExtract) {
3870       // Extract appropriate subvector and generate a vector shuffle
3871       for (unsigned Input = 0; Input < 2; ++Input) {
3872         SDValue &Src = Input == 0 ? Src1 : Src2;
3873         if (StartIdx[Input] < 0)
3874           Src = DAG.getUNDEF(VT);
3875         else {
3876           Src = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Src,
3877                             DAG.getVectorIdxConstant(StartIdx[Input], DL));
3878         }
3879       }
3880 
3881       // Calculate new mask.
3882       SmallVector<int, 8> MappedOps(Mask);
3883       for (int &Idx : MappedOps) {
3884         if (Idx >= (int)SrcNumElts)
3885           Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
3886         else if (Idx >= 0)
3887           Idx -= StartIdx[0];
3888       }
3889 
3890       setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, MappedOps));
3891       return;
3892     }
3893   }
3894 
3895   // We can't use either concat vectors or extract subvectors so fall back to
3896   // replacing the shuffle with extract and build vector.
3897   // to insert and build vector.
3898   EVT EltVT = VT.getVectorElementType();
3899   SmallVector<SDValue,8> Ops;
3900   for (int Idx : Mask) {
3901     SDValue Res;
3902 
3903     if (Idx < 0) {
3904       Res = DAG.getUNDEF(EltVT);
3905     } else {
3906       SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
3907       if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts;
3908 
3909       Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Src,
3910                         DAG.getVectorIdxConstant(Idx, DL));
3911     }
3912 
3913     Ops.push_back(Res);
3914   }
3915 
3916   setValue(&I, DAG.getBuildVector(VT, DL, Ops));
3917 }
3918 
3919 void SelectionDAGBuilder::visitInsertValue(const InsertValueInst &I) {
3920   ArrayRef<unsigned> Indices = I.getIndices();
3921   const Value *Op0 = I.getOperand(0);
3922   const Value *Op1 = I.getOperand(1);
3923   Type *AggTy = I.getType();
3924   Type *ValTy = Op1->getType();
3925   bool IntoUndef = isa<UndefValue>(Op0);
3926   bool FromUndef = isa<UndefValue>(Op1);
3927 
3928   unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
3929 
3930   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3931   SmallVector<EVT, 4> AggValueVTs;
3932   ComputeValueVTs(TLI, DAG.getDataLayout(), AggTy, AggValueVTs);
3933   SmallVector<EVT, 4> ValValueVTs;
3934   ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
3935 
3936   unsigned NumAggValues = AggValueVTs.size();
3937   unsigned NumValValues = ValValueVTs.size();
3938   SmallVector<SDValue, 4> Values(NumAggValues);
3939 
3940   // Ignore an insertvalue that produces an empty object
3941   if (!NumAggValues) {
3942     setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
3943     return;
3944   }
3945 
3946   SDValue Agg = getValue(Op0);
3947   unsigned i = 0;
3948   // Copy the beginning value(s) from the original aggregate.
3949   for (; i != LinearIndex; ++i)
3950     Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3951                 SDValue(Agg.getNode(), Agg.getResNo() + i);
3952   // Copy values from the inserted value(s).
3953   if (NumValValues) {
3954     SDValue Val = getValue(Op1);
3955     for (; i != LinearIndex + NumValValues; ++i)
3956       Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3957                   SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
3958   }
3959   // Copy remaining value(s) from the original aggregate.
3960   for (; i != NumAggValues; ++i)
3961     Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3962                 SDValue(Agg.getNode(), Agg.getResNo() + i);
3963 
3964   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3965                            DAG.getVTList(AggValueVTs), Values));
3966 }
3967 
3968 void SelectionDAGBuilder::visitExtractValue(const ExtractValueInst &I) {
3969   ArrayRef<unsigned> Indices = I.getIndices();
3970   const Value *Op0 = I.getOperand(0);
3971   Type *AggTy = Op0->getType();
3972   Type *ValTy = I.getType();
3973   bool OutOfUndef = isa<UndefValue>(Op0);
3974 
3975   unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
3976 
3977   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3978   SmallVector<EVT, 4> ValValueVTs;
3979   ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
3980 
3981   unsigned NumValValues = ValValueVTs.size();
3982 
3983   // Ignore a extractvalue that produces an empty object
3984   if (!NumValValues) {
3985     setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
3986     return;
3987   }
3988 
3989   SmallVector<SDValue, 4> Values(NumValValues);
3990 
3991   SDValue Agg = getValue(Op0);
3992   // Copy out the selected value(s).
3993   for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
3994     Values[i - LinearIndex] =
3995       OutOfUndef ?
3996         DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
3997         SDValue(Agg.getNode(), Agg.getResNo() + i);
3998 
3999   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
4000                            DAG.getVTList(ValValueVTs), Values));
4001 }
4002 
4003 void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
4004   Value *Op0 = I.getOperand(0);
4005   // Note that the pointer operand may be a vector of pointers. Take the scalar
4006   // element which holds a pointer.
4007   unsigned AS = Op0->getType()->getScalarType()->getPointerAddressSpace();
4008   SDValue N = getValue(Op0);
4009   SDLoc dl = getCurSDLoc();
4010   auto &TLI = DAG.getTargetLoweringInfo();
4011 
4012   // Normalize Vector GEP - all scalar operands should be converted to the
4013   // splat vector.
4014   bool IsVectorGEP = I.getType()->isVectorTy();
4015   ElementCount VectorElementCount =
4016       IsVectorGEP ? cast<VectorType>(I.getType())->getElementCount()
4017                   : ElementCount::getFixed(0);
4018 
4019   if (IsVectorGEP && !N.getValueType().isVector()) {
4020     LLVMContext &Context = *DAG.getContext();
4021     EVT VT = EVT::getVectorVT(Context, N.getValueType(), VectorElementCount);
4022     N = DAG.getSplat(VT, dl, N);
4023   }
4024 
4025   for (gep_type_iterator GTI = gep_type_begin(&I), E = gep_type_end(&I);
4026        GTI != E; ++GTI) {
4027     const Value *Idx = GTI.getOperand();
4028     if (StructType *StTy = GTI.getStructTypeOrNull()) {
4029       unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
4030       if (Field) {
4031         // N = N + Offset
4032         uint64_t Offset =
4033             DAG.getDataLayout().getStructLayout(StTy)->getElementOffset(Field);
4034 
4035         // In an inbounds GEP with an offset that is nonnegative even when
4036         // interpreted as signed, assume there is no unsigned overflow.
4037         SDNodeFlags Flags;
4038         if (int64_t(Offset) >= 0 && cast<GEPOperator>(I).isInBounds())
4039           Flags.setNoUnsignedWrap(true);
4040 
4041         N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N,
4042                         DAG.getConstant(Offset, dl, N.getValueType()), Flags);
4043       }
4044     } else {
4045       // IdxSize is the width of the arithmetic according to IR semantics.
4046       // In SelectionDAG, we may prefer to do arithmetic in a wider bitwidth
4047       // (and fix up the result later).
4048       unsigned IdxSize = DAG.getDataLayout().getIndexSizeInBits(AS);
4049       MVT IdxTy = MVT::getIntegerVT(IdxSize);
4050       TypeSize ElementSize =
4051           DAG.getDataLayout().getTypeAllocSize(GTI.getIndexedType());
4052       // We intentionally mask away the high bits here; ElementSize may not
4053       // fit in IdxTy.
4054       APInt ElementMul(IdxSize, ElementSize.getKnownMinValue());
4055       bool ElementScalable = ElementSize.isScalable();
4056 
4057       // If this is a scalar constant or a splat vector of constants,
4058       // handle it quickly.
4059       const auto *C = dyn_cast<Constant>(Idx);
4060       if (C && isa<VectorType>(C->getType()))
4061         C = C->getSplatValue();
4062 
4063       const auto *CI = dyn_cast_or_null<ConstantInt>(C);
4064       if (CI && CI->isZero())
4065         continue;
4066       if (CI && !ElementScalable) {
4067         APInt Offs = ElementMul * CI->getValue().sextOrTrunc(IdxSize);
4068         LLVMContext &Context = *DAG.getContext();
4069         SDValue OffsVal;
4070         if (IsVectorGEP)
4071           OffsVal = DAG.getConstant(
4072               Offs, dl, EVT::getVectorVT(Context, IdxTy, VectorElementCount));
4073         else
4074           OffsVal = DAG.getConstant(Offs, dl, IdxTy);
4075 
4076         // In an inbounds GEP with an offset that is nonnegative even when
4077         // interpreted as signed, assume there is no unsigned overflow.
4078         SDNodeFlags Flags;
4079         if (Offs.isNonNegative() && cast<GEPOperator>(I).isInBounds())
4080           Flags.setNoUnsignedWrap(true);
4081 
4082         OffsVal = DAG.getSExtOrTrunc(OffsVal, dl, N.getValueType());
4083 
4084         N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N, OffsVal, Flags);
4085         continue;
4086       }
4087 
4088       // N = N + Idx * ElementMul;
4089       SDValue IdxN = getValue(Idx);
4090 
4091       if (!IdxN.getValueType().isVector() && IsVectorGEP) {
4092         EVT VT = EVT::getVectorVT(*Context, IdxN.getValueType(),
4093                                   VectorElementCount);
4094         IdxN = DAG.getSplat(VT, dl, IdxN);
4095       }
4096 
4097       // If the index is smaller or larger than intptr_t, truncate or extend
4098       // it.
4099       IdxN = DAG.getSExtOrTrunc(IdxN, dl, N.getValueType());
4100 
4101       if (ElementScalable) {
4102         EVT VScaleTy = N.getValueType().getScalarType();
4103         SDValue VScale = DAG.getNode(
4104             ISD::VSCALE, dl, VScaleTy,
4105             DAG.getConstant(ElementMul.getZExtValue(), dl, VScaleTy));
4106         if (IsVectorGEP)
4107           VScale = DAG.getSplatVector(N.getValueType(), dl, VScale);
4108         IdxN = DAG.getNode(ISD::MUL, dl, N.getValueType(), IdxN, VScale);
4109       } else {
4110         // If this is a multiply by a power of two, turn it into a shl
4111         // immediately.  This is a very common case.
4112         if (ElementMul != 1) {
4113           if (ElementMul.isPowerOf2()) {
4114             unsigned Amt = ElementMul.logBase2();
4115             IdxN = DAG.getNode(ISD::SHL, dl,
4116                                N.getValueType(), IdxN,
4117                                DAG.getConstant(Amt, dl, IdxN.getValueType()));
4118           } else {
4119             SDValue Scale = DAG.getConstant(ElementMul.getZExtValue(), dl,
4120                                             IdxN.getValueType());
4121             IdxN = DAG.getNode(ISD::MUL, dl,
4122                                N.getValueType(), IdxN, Scale);
4123           }
4124         }
4125       }
4126 
4127       N = DAG.getNode(ISD::ADD, dl,
4128                       N.getValueType(), N, IdxN);
4129     }
4130   }
4131 
4132   MVT PtrTy = TLI.getPointerTy(DAG.getDataLayout(), AS);
4133   MVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout(), AS);
4134   if (IsVectorGEP) {
4135     PtrTy = MVT::getVectorVT(PtrTy, VectorElementCount);
4136     PtrMemTy = MVT::getVectorVT(PtrMemTy, VectorElementCount);
4137   }
4138 
4139   if (PtrMemTy != PtrTy && !cast<GEPOperator>(I).isInBounds())
4140     N = DAG.getPtrExtendInReg(N, dl, PtrMemTy);
4141 
4142   setValue(&I, N);
4143 }
4144 
4145 void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
4146   // If this is a fixed sized alloca in the entry block of the function,
4147   // allocate it statically on the stack.
4148   if (FuncInfo.StaticAllocaMap.count(&I))
4149     return;   // getValue will auto-populate this.
4150 
4151   SDLoc dl = getCurSDLoc();
4152   Type *Ty = I.getAllocatedType();
4153   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4154   auto &DL = DAG.getDataLayout();
4155   TypeSize TySize = DL.getTypeAllocSize(Ty);
4156   MaybeAlign Alignment = std::max(DL.getPrefTypeAlign(Ty), I.getAlign());
4157 
4158   SDValue AllocSize = getValue(I.getArraySize());
4159 
4160   EVT IntPtr = TLI.getPointerTy(DL, I.getAddressSpace());
4161   if (AllocSize.getValueType() != IntPtr)
4162     AllocSize = DAG.getZExtOrTrunc(AllocSize, dl, IntPtr);
4163 
4164   if (TySize.isScalable())
4165     AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr, AllocSize,
4166                             DAG.getVScale(dl, IntPtr,
4167                                           APInt(IntPtr.getScalarSizeInBits(),
4168                                                 TySize.getKnownMinValue())));
4169   else {
4170     SDValue TySizeValue =
4171         DAG.getConstant(TySize.getFixedValue(), dl, MVT::getIntegerVT(64));
4172     AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr, AllocSize,
4173                             DAG.getZExtOrTrunc(TySizeValue, dl, IntPtr));
4174   }
4175 
4176   // Handle alignment.  If the requested alignment is less than or equal to
4177   // the stack alignment, ignore it.  If the size is greater than or equal to
4178   // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
4179   Align StackAlign = DAG.getSubtarget().getFrameLowering()->getStackAlign();
4180   if (*Alignment <= StackAlign)
4181     Alignment = std::nullopt;
4182 
4183   const uint64_t StackAlignMask = StackAlign.value() - 1U;
4184   // Round the size of the allocation up to the stack alignment size
4185   // by add SA-1 to the size. This doesn't overflow because we're computing
4186   // an address inside an alloca.
4187   SDNodeFlags Flags;
4188   Flags.setNoUnsignedWrap(true);
4189   AllocSize = DAG.getNode(ISD::ADD, dl, AllocSize.getValueType(), AllocSize,
4190                           DAG.getConstant(StackAlignMask, dl, IntPtr), Flags);
4191 
4192   // Mask out the low bits for alignment purposes.
4193   AllocSize = DAG.getNode(ISD::AND, dl, AllocSize.getValueType(), AllocSize,
4194                           DAG.getConstant(~StackAlignMask, dl, IntPtr));
4195 
4196   SDValue Ops[] = {
4197       getRoot(), AllocSize,
4198       DAG.getConstant(Alignment ? Alignment->value() : 0, dl, IntPtr)};
4199   SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
4200   SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, dl, VTs, Ops);
4201   setValue(&I, DSA);
4202   DAG.setRoot(DSA.getValue(1));
4203 
4204   assert(FuncInfo.MF->getFrameInfo().hasVarSizedObjects());
4205 }
4206 
4207 static const MDNode *getRangeMetadata(const Instruction &I) {
4208   // If !noundef is not present, then !range violation results in a poison
4209   // value rather than immediate undefined behavior. In theory, transferring
4210   // these annotations to SDAG is fine, but in practice there are key SDAG
4211   // transforms that are known not to be poison-safe, such as folding logical
4212   // and/or to bitwise and/or. For now, only transfer !range if !noundef is
4213   // also present.
4214   if (!I.hasMetadata(LLVMContext::MD_noundef))
4215     return nullptr;
4216   return I.getMetadata(LLVMContext::MD_range);
4217 }
4218 
4219 void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
4220   if (I.isAtomic())
4221     return visitAtomicLoad(I);
4222 
4223   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4224   const Value *SV = I.getOperand(0);
4225   if (TLI.supportSwiftError()) {
4226     // Swifterror values can come from either a function parameter with
4227     // swifterror attribute or an alloca with swifterror attribute.
4228     if (const Argument *Arg = dyn_cast<Argument>(SV)) {
4229       if (Arg->hasSwiftErrorAttr())
4230         return visitLoadFromSwiftError(I);
4231     }
4232 
4233     if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
4234       if (Alloca->isSwiftError())
4235         return visitLoadFromSwiftError(I);
4236     }
4237   }
4238 
4239   SDValue Ptr = getValue(SV);
4240 
4241   Type *Ty = I.getType();
4242   SmallVector<EVT, 4> ValueVTs, MemVTs;
4243   SmallVector<TypeSize, 4> Offsets;
4244   ComputeValueVTs(TLI, DAG.getDataLayout(), Ty, ValueVTs, &MemVTs, &Offsets, 0);
4245   unsigned NumValues = ValueVTs.size();
4246   if (NumValues == 0)
4247     return;
4248 
4249   Align Alignment = I.getAlign();
4250   AAMDNodes AAInfo = I.getAAMetadata();
4251   const MDNode *Ranges = getRangeMetadata(I);
4252   bool isVolatile = I.isVolatile();
4253   MachineMemOperand::Flags MMOFlags =
4254       TLI.getLoadMemOperandFlags(I, DAG.getDataLayout(), AC, LibInfo);
4255 
4256   SDValue Root;
4257   bool ConstantMemory = false;
4258   if (isVolatile)
4259     // Serialize volatile loads with other side effects.
4260     Root = getRoot();
4261   else if (NumValues > MaxParallelChains)
4262     Root = getMemoryRoot();
4263   else if (AA &&
4264            AA->pointsToConstantMemory(MemoryLocation(
4265                SV,
4266                LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)),
4267                AAInfo))) {
4268     // Do not serialize (non-volatile) loads of constant memory with anything.
4269     Root = DAG.getEntryNode();
4270     ConstantMemory = true;
4271     MMOFlags |= MachineMemOperand::MOInvariant;
4272   } else {
4273     // Do not serialize non-volatile loads against each other.
4274     Root = DAG.getRoot();
4275   }
4276 
4277   SDLoc dl = getCurSDLoc();
4278 
4279   if (isVolatile)
4280     Root = TLI.prepareVolatileOrAtomicLoad(Root, dl, DAG);
4281 
4282   SmallVector<SDValue, 4> Values(NumValues);
4283   SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
4284 
4285   unsigned ChainI = 0;
4286   for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4287     // Serializing loads here may result in excessive register pressure, and
4288     // TokenFactor places arbitrary choke points on the scheduler. SD scheduling
4289     // could recover a bit by hoisting nodes upward in the chain by recognizing
4290     // they are side-effect free or do not alias. The optimizer should really
4291     // avoid this case by converting large object/array copies to llvm.memcpy
4292     // (MaxParallelChains should always remain as failsafe).
4293     if (ChainI == MaxParallelChains) {
4294       assert(PendingLoads.empty() && "PendingLoads must be serialized first");
4295       SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4296                                   ArrayRef(Chains.data(), ChainI));
4297       Root = Chain;
4298       ChainI = 0;
4299     }
4300 
4301     // TODO: MachinePointerInfo only supports a fixed length offset.
4302     MachinePointerInfo PtrInfo =
4303         !Offsets[i].isScalable() || Offsets[i].isZero()
4304             ? MachinePointerInfo(SV, Offsets[i].getKnownMinValue())
4305             : MachinePointerInfo();
4306 
4307     SDValue A = DAG.getObjectPtrOffset(dl, Ptr, Offsets[i]);
4308     SDValue L = DAG.getLoad(MemVTs[i], dl, Root, A, PtrInfo, Alignment,
4309                             MMOFlags, AAInfo, Ranges);
4310     Chains[ChainI] = L.getValue(1);
4311 
4312     if (MemVTs[i] != ValueVTs[i])
4313       L = DAG.getPtrExtOrTrunc(L, dl, ValueVTs[i]);
4314 
4315     Values[i] = L;
4316   }
4317 
4318   if (!ConstantMemory) {
4319     SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4320                                 ArrayRef(Chains.data(), ChainI));
4321     if (isVolatile)
4322       DAG.setRoot(Chain);
4323     else
4324       PendingLoads.push_back(Chain);
4325   }
4326 
4327   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, dl,
4328                            DAG.getVTList(ValueVTs), Values));
4329 }
4330 
4331 void SelectionDAGBuilder::visitStoreToSwiftError(const StoreInst &I) {
4332   assert(DAG.getTargetLoweringInfo().supportSwiftError() &&
4333          "call visitStoreToSwiftError when backend supports swifterror");
4334 
4335   SmallVector<EVT, 4> ValueVTs;
4336   SmallVector<uint64_t, 4> Offsets;
4337   const Value *SrcV = I.getOperand(0);
4338   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
4339                   SrcV->getType(), ValueVTs, &Offsets, 0);
4340   assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
4341          "expect a single EVT for swifterror");
4342 
4343   SDValue Src = getValue(SrcV);
4344   // Create a virtual register, then update the virtual register.
4345   Register VReg =
4346       SwiftError.getOrCreateVRegDefAt(&I, FuncInfo.MBB, I.getPointerOperand());
4347   // Chain, DL, Reg, N or Chain, DL, Reg, N, Glue
4348   // Chain can be getRoot or getControlRoot.
4349   SDValue CopyNode = DAG.getCopyToReg(getRoot(), getCurSDLoc(), VReg,
4350                                       SDValue(Src.getNode(), Src.getResNo()));
4351   DAG.setRoot(CopyNode);
4352 }
4353 
4354 void SelectionDAGBuilder::visitLoadFromSwiftError(const LoadInst &I) {
4355   assert(DAG.getTargetLoweringInfo().supportSwiftError() &&
4356          "call visitLoadFromSwiftError when backend supports swifterror");
4357 
4358   assert(!I.isVolatile() &&
4359          !I.hasMetadata(LLVMContext::MD_nontemporal) &&
4360          !I.hasMetadata(LLVMContext::MD_invariant_load) &&
4361          "Support volatile, non temporal, invariant for load_from_swift_error");
4362 
4363   const Value *SV = I.getOperand(0);
4364   Type *Ty = I.getType();
4365   assert(
4366       (!AA ||
4367        !AA->pointsToConstantMemory(MemoryLocation(
4368            SV, LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)),
4369            I.getAAMetadata()))) &&
4370       "load_from_swift_error should not be constant memory");
4371 
4372   SmallVector<EVT, 4> ValueVTs;
4373   SmallVector<uint64_t, 4> Offsets;
4374   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), Ty,
4375                   ValueVTs, &Offsets, 0);
4376   assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
4377          "expect a single EVT for swifterror");
4378 
4379   // Chain, DL, Reg, VT, Glue or Chain, DL, Reg, VT
4380   SDValue L = DAG.getCopyFromReg(
4381       getRoot(), getCurSDLoc(),
4382       SwiftError.getOrCreateVRegUseAt(&I, FuncInfo.MBB, SV), ValueVTs[0]);
4383 
4384   setValue(&I, L);
4385 }
4386 
4387 void SelectionDAGBuilder::visitStore(const StoreInst &I) {
4388   if (I.isAtomic())
4389     return visitAtomicStore(I);
4390 
4391   const Value *SrcV = I.getOperand(0);
4392   const Value *PtrV = I.getOperand(1);
4393 
4394   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4395   if (TLI.supportSwiftError()) {
4396     // Swifterror values can come from either a function parameter with
4397     // swifterror attribute or an alloca with swifterror attribute.
4398     if (const Argument *Arg = dyn_cast<Argument>(PtrV)) {
4399       if (Arg->hasSwiftErrorAttr())
4400         return visitStoreToSwiftError(I);
4401     }
4402 
4403     if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
4404       if (Alloca->isSwiftError())
4405         return visitStoreToSwiftError(I);
4406     }
4407   }
4408 
4409   SmallVector<EVT, 4> ValueVTs, MemVTs;
4410   SmallVector<TypeSize, 4> Offsets;
4411   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
4412                   SrcV->getType(), ValueVTs, &MemVTs, &Offsets, 0);
4413   unsigned NumValues = ValueVTs.size();
4414   if (NumValues == 0)
4415     return;
4416 
4417   // Get the lowered operands. Note that we do this after
4418   // checking if NumResults is zero, because with zero results
4419   // the operands won't have values in the map.
4420   SDValue Src = getValue(SrcV);
4421   SDValue Ptr = getValue(PtrV);
4422 
4423   SDValue Root = I.isVolatile() ? getRoot() : getMemoryRoot();
4424   SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
4425   SDLoc dl = getCurSDLoc();
4426   Align Alignment = I.getAlign();
4427   AAMDNodes AAInfo = I.getAAMetadata();
4428 
4429   auto MMOFlags = TLI.getStoreMemOperandFlags(I, DAG.getDataLayout());
4430 
4431   unsigned ChainI = 0;
4432   for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4433     // See visitLoad comments.
4434     if (ChainI == MaxParallelChains) {
4435       SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4436                                   ArrayRef(Chains.data(), ChainI));
4437       Root = Chain;
4438       ChainI = 0;
4439     }
4440 
4441     // TODO: MachinePointerInfo only supports a fixed length offset.
4442     MachinePointerInfo PtrInfo =
4443         !Offsets[i].isScalable() || Offsets[i].isZero()
4444             ? MachinePointerInfo(PtrV, Offsets[i].getKnownMinValue())
4445             : MachinePointerInfo();
4446 
4447     SDValue Add = DAG.getObjectPtrOffset(dl, Ptr, Offsets[i]);
4448     SDValue Val = SDValue(Src.getNode(), Src.getResNo() + i);
4449     if (MemVTs[i] != ValueVTs[i])
4450       Val = DAG.getPtrExtOrTrunc(Val, dl, MemVTs[i]);
4451     SDValue St =
4452         DAG.getStore(Root, dl, Val, Add, PtrInfo, Alignment, MMOFlags, AAInfo);
4453     Chains[ChainI] = St;
4454   }
4455 
4456   SDValue StoreNode = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4457                                   ArrayRef(Chains.data(), ChainI));
4458   setValue(&I, StoreNode);
4459   DAG.setRoot(StoreNode);
4460 }
4461 
4462 void SelectionDAGBuilder::visitMaskedStore(const CallInst &I,
4463                                            bool IsCompressing) {
4464   SDLoc sdl = getCurSDLoc();
4465 
4466   auto getMaskedStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4467                                MaybeAlign &Alignment) {
4468     // llvm.masked.store.*(Src0, Ptr, alignment, Mask)
4469     Src0 = I.getArgOperand(0);
4470     Ptr = I.getArgOperand(1);
4471     Alignment = cast<ConstantInt>(I.getArgOperand(2))->getMaybeAlignValue();
4472     Mask = I.getArgOperand(3);
4473   };
4474   auto getCompressingStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4475                                     MaybeAlign &Alignment) {
4476     // llvm.masked.compressstore.*(Src0, Ptr, Mask)
4477     Src0 = I.getArgOperand(0);
4478     Ptr = I.getArgOperand(1);
4479     Mask = I.getArgOperand(2);
4480     Alignment = std::nullopt;
4481   };
4482 
4483   Value  *PtrOperand, *MaskOperand, *Src0Operand;
4484   MaybeAlign Alignment;
4485   if (IsCompressing)
4486     getCompressingStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4487   else
4488     getMaskedStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4489 
4490   SDValue Ptr = getValue(PtrOperand);
4491   SDValue Src0 = getValue(Src0Operand);
4492   SDValue Mask = getValue(MaskOperand);
4493   SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
4494 
4495   EVT VT = Src0.getValueType();
4496   if (!Alignment)
4497     Alignment = DAG.getEVTAlign(VT);
4498 
4499   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4500       MachinePointerInfo(PtrOperand), MachineMemOperand::MOStore,
4501       MemoryLocation::UnknownSize, *Alignment, I.getAAMetadata());
4502   SDValue StoreNode =
4503       DAG.getMaskedStore(getMemoryRoot(), sdl, Src0, Ptr, Offset, Mask, VT, MMO,
4504                          ISD::UNINDEXED, false /* Truncating */, IsCompressing);
4505   DAG.setRoot(StoreNode);
4506   setValue(&I, StoreNode);
4507 }
4508 
4509 // Get a uniform base for the Gather/Scatter intrinsic.
4510 // The first argument of the Gather/Scatter intrinsic is a vector of pointers.
4511 // We try to represent it as a base pointer + vector of indices.
4512 // Usually, the vector of pointers comes from a 'getelementptr' instruction.
4513 // The first operand of the GEP may be a single pointer or a vector of pointers
4514 // Example:
4515 //   %gep.ptr = getelementptr i32, <8 x i32*> %vptr, <8 x i32> %ind
4516 //  or
4517 //   %gep.ptr = getelementptr i32, i32* %ptr,        <8 x i32> %ind
4518 // %res = call <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> %gep.ptr, ..
4519 //
4520 // When the first GEP operand is a single pointer - it is the uniform base we
4521 // are looking for. If first operand of the GEP is a splat vector - we
4522 // extract the splat value and use it as a uniform base.
4523 // In all other cases the function returns 'false'.
4524 static bool getUniformBase(const Value *Ptr, SDValue &Base, SDValue &Index,
4525                            ISD::MemIndexType &IndexType, SDValue &Scale,
4526                            SelectionDAGBuilder *SDB, const BasicBlock *CurBB,
4527                            uint64_t ElemSize) {
4528   SelectionDAG& DAG = SDB->DAG;
4529   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4530   const DataLayout &DL = DAG.getDataLayout();
4531 
4532   assert(Ptr->getType()->isVectorTy() && "Unexpected pointer type");
4533 
4534   // Handle splat constant pointer.
4535   if (auto *C = dyn_cast<Constant>(Ptr)) {
4536     C = C->getSplatValue();
4537     if (!C)
4538       return false;
4539 
4540     Base = SDB->getValue(C);
4541 
4542     ElementCount NumElts = cast<VectorType>(Ptr->getType())->getElementCount();
4543     EVT VT = EVT::getVectorVT(*DAG.getContext(), TLI.getPointerTy(DL), NumElts);
4544     Index = DAG.getConstant(0, SDB->getCurSDLoc(), VT);
4545     IndexType = ISD::SIGNED_SCALED;
4546     Scale = DAG.getTargetConstant(1, SDB->getCurSDLoc(), TLI.getPointerTy(DL));
4547     return true;
4548   }
4549 
4550   const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
4551   if (!GEP || GEP->getParent() != CurBB)
4552     return false;
4553 
4554   if (GEP->getNumOperands() != 2)
4555     return false;
4556 
4557   const Value *BasePtr = GEP->getPointerOperand();
4558   const Value *IndexVal = GEP->getOperand(GEP->getNumOperands() - 1);
4559 
4560   // Make sure the base is scalar and the index is a vector.
4561   if (BasePtr->getType()->isVectorTy() || !IndexVal->getType()->isVectorTy())
4562     return false;
4563 
4564   TypeSize ScaleVal = DL.getTypeAllocSize(GEP->getResultElementType());
4565   if (ScaleVal.isScalable())
4566     return false;
4567 
4568   // Target may not support the required addressing mode.
4569   if (ScaleVal != 1 &&
4570       !TLI.isLegalScaleForGatherScatter(ScaleVal.getFixedValue(), ElemSize))
4571     return false;
4572 
4573   Base = SDB->getValue(BasePtr);
4574   Index = SDB->getValue(IndexVal);
4575   IndexType = ISD::SIGNED_SCALED;
4576 
4577   Scale =
4578       DAG.getTargetConstant(ScaleVal, SDB->getCurSDLoc(), TLI.getPointerTy(DL));
4579   return true;
4580 }
4581 
4582 void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
4583   SDLoc sdl = getCurSDLoc();
4584 
4585   // llvm.masked.scatter.*(Src0, Ptrs, alignment, Mask)
4586   const Value *Ptr = I.getArgOperand(1);
4587   SDValue Src0 = getValue(I.getArgOperand(0));
4588   SDValue Mask = getValue(I.getArgOperand(3));
4589   EVT VT = Src0.getValueType();
4590   Align Alignment = cast<ConstantInt>(I.getArgOperand(2))
4591                         ->getMaybeAlignValue()
4592                         .value_or(DAG.getEVTAlign(VT.getScalarType()));
4593   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4594 
4595   SDValue Base;
4596   SDValue Index;
4597   ISD::MemIndexType IndexType;
4598   SDValue Scale;
4599   bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this,
4600                                     I.getParent(), VT.getScalarStoreSize());
4601 
4602   unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
4603   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4604       MachinePointerInfo(AS), MachineMemOperand::MOStore,
4605       // TODO: Make MachineMemOperands aware of scalable
4606       // vectors.
4607       MemoryLocation::UnknownSize, Alignment, I.getAAMetadata());
4608   if (!UniformBase) {
4609     Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4610     Index = getValue(Ptr);
4611     IndexType = ISD::SIGNED_SCALED;
4612     Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4613   }
4614 
4615   EVT IdxVT = Index.getValueType();
4616   EVT EltTy = IdxVT.getVectorElementType();
4617   if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
4618     EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
4619     Index = DAG.getNode(ISD::SIGN_EXTEND, sdl, NewIdxVT, Index);
4620   }
4621 
4622   SDValue Ops[] = { getMemoryRoot(), Src0, Mask, Base, Index, Scale };
4623   SDValue Scatter = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), VT, sdl,
4624                                          Ops, MMO, IndexType, false);
4625   DAG.setRoot(Scatter);
4626   setValue(&I, Scatter);
4627 }
4628 
4629 void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) {
4630   SDLoc sdl = getCurSDLoc();
4631 
4632   auto getMaskedLoadOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4633                               MaybeAlign &Alignment) {
4634     // @llvm.masked.load.*(Ptr, alignment, Mask, Src0)
4635     Ptr = I.getArgOperand(0);
4636     Alignment = cast<ConstantInt>(I.getArgOperand(1))->getMaybeAlignValue();
4637     Mask = I.getArgOperand(2);
4638     Src0 = I.getArgOperand(3);
4639   };
4640   auto getExpandingLoadOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4641                                  MaybeAlign &Alignment) {
4642     // @llvm.masked.expandload.*(Ptr, Mask, Src0)
4643     Ptr = I.getArgOperand(0);
4644     Alignment = std::nullopt;
4645     Mask = I.getArgOperand(1);
4646     Src0 = I.getArgOperand(2);
4647   };
4648 
4649   Value  *PtrOperand, *MaskOperand, *Src0Operand;
4650   MaybeAlign Alignment;
4651   if (IsExpanding)
4652     getExpandingLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4653   else
4654     getMaskedLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4655 
4656   SDValue Ptr = getValue(PtrOperand);
4657   SDValue Src0 = getValue(Src0Operand);
4658   SDValue Mask = getValue(MaskOperand);
4659   SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
4660 
4661   EVT VT = Src0.getValueType();
4662   if (!Alignment)
4663     Alignment = DAG.getEVTAlign(VT);
4664 
4665   AAMDNodes AAInfo = I.getAAMetadata();
4666   const MDNode *Ranges = getRangeMetadata(I);
4667 
4668   // Do not serialize masked loads of constant memory with anything.
4669   MemoryLocation ML = MemoryLocation::getAfter(PtrOperand, AAInfo);
4670   bool AddToChain = !AA || !AA->pointsToConstantMemory(ML);
4671 
4672   SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
4673 
4674   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4675       MachinePointerInfo(PtrOperand), MachineMemOperand::MOLoad,
4676       MemoryLocation::UnknownSize, *Alignment, AAInfo, Ranges);
4677 
4678   SDValue Load =
4679       DAG.getMaskedLoad(VT, sdl, InChain, Ptr, Offset, Mask, Src0, VT, MMO,
4680                         ISD::UNINDEXED, ISD::NON_EXTLOAD, IsExpanding);
4681   if (AddToChain)
4682     PendingLoads.push_back(Load.getValue(1));
4683   setValue(&I, Load);
4684 }
4685 
4686 void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
4687   SDLoc sdl = getCurSDLoc();
4688 
4689   // @llvm.masked.gather.*(Ptrs, alignment, Mask, Src0)
4690   const Value *Ptr = I.getArgOperand(0);
4691   SDValue Src0 = getValue(I.getArgOperand(3));
4692   SDValue Mask = getValue(I.getArgOperand(2));
4693 
4694   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4695   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4696   Align Alignment = cast<ConstantInt>(I.getArgOperand(1))
4697                         ->getMaybeAlignValue()
4698                         .value_or(DAG.getEVTAlign(VT.getScalarType()));
4699 
4700   const MDNode *Ranges = getRangeMetadata(I);
4701 
4702   SDValue Root = DAG.getRoot();
4703   SDValue Base;
4704   SDValue Index;
4705   ISD::MemIndexType IndexType;
4706   SDValue Scale;
4707   bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this,
4708                                     I.getParent(), VT.getScalarStoreSize());
4709   unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
4710   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4711       MachinePointerInfo(AS), MachineMemOperand::MOLoad,
4712       // TODO: Make MachineMemOperands aware of scalable
4713       // vectors.
4714       MemoryLocation::UnknownSize, Alignment, I.getAAMetadata(), Ranges);
4715 
4716   if (!UniformBase) {
4717     Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4718     Index = getValue(Ptr);
4719     IndexType = ISD::SIGNED_SCALED;
4720     Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4721   }
4722 
4723   EVT IdxVT = Index.getValueType();
4724   EVT EltTy = IdxVT.getVectorElementType();
4725   if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
4726     EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
4727     Index = DAG.getNode(ISD::SIGN_EXTEND, sdl, NewIdxVT, Index);
4728   }
4729 
4730   SDValue Ops[] = { Root, Src0, Mask, Base, Index, Scale };
4731   SDValue Gather = DAG.getMaskedGather(DAG.getVTList(VT, MVT::Other), VT, sdl,
4732                                        Ops, MMO, IndexType, ISD::NON_EXTLOAD);
4733 
4734   PendingLoads.push_back(Gather.getValue(1));
4735   setValue(&I, Gather);
4736 }
4737 
4738 void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
4739   SDLoc dl = getCurSDLoc();
4740   AtomicOrdering SuccessOrdering = I.getSuccessOrdering();
4741   AtomicOrdering FailureOrdering = I.getFailureOrdering();
4742   SyncScope::ID SSID = I.getSyncScopeID();
4743 
4744   SDValue InChain = getRoot();
4745 
4746   MVT MemVT = getValue(I.getCompareOperand()).getSimpleValueType();
4747   SDVTList VTs = DAG.getVTList(MemVT, MVT::i1, MVT::Other);
4748 
4749   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4750   auto Flags = TLI.getAtomicMemOperandFlags(I, DAG.getDataLayout());
4751 
4752   MachineFunction &MF = DAG.getMachineFunction();
4753   MachineMemOperand *MMO = MF.getMachineMemOperand(
4754       MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4755       DAG.getEVTAlign(MemVT), AAMDNodes(), nullptr, SSID, SuccessOrdering,
4756       FailureOrdering);
4757 
4758   SDValue L = DAG.getAtomicCmpSwap(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS,
4759                                    dl, MemVT, VTs, InChain,
4760                                    getValue(I.getPointerOperand()),
4761                                    getValue(I.getCompareOperand()),
4762                                    getValue(I.getNewValOperand()), MMO);
4763 
4764   SDValue OutChain = L.getValue(2);
4765 
4766   setValue(&I, L);
4767   DAG.setRoot(OutChain);
4768 }
4769 
4770 void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
4771   SDLoc dl = getCurSDLoc();
4772   ISD::NodeType NT;
4773   switch (I.getOperation()) {
4774   default: llvm_unreachable("Unknown atomicrmw operation");
4775   case AtomicRMWInst::Xchg: NT = ISD::ATOMIC_SWAP; break;
4776   case AtomicRMWInst::Add:  NT = ISD::ATOMIC_LOAD_ADD; break;
4777   case AtomicRMWInst::Sub:  NT = ISD::ATOMIC_LOAD_SUB; break;
4778   case AtomicRMWInst::And:  NT = ISD::ATOMIC_LOAD_AND; break;
4779   case AtomicRMWInst::Nand: NT = ISD::ATOMIC_LOAD_NAND; break;
4780   case AtomicRMWInst::Or:   NT = ISD::ATOMIC_LOAD_OR; break;
4781   case AtomicRMWInst::Xor:  NT = ISD::ATOMIC_LOAD_XOR; break;
4782   case AtomicRMWInst::Max:  NT = ISD::ATOMIC_LOAD_MAX; break;
4783   case AtomicRMWInst::Min:  NT = ISD::ATOMIC_LOAD_MIN; break;
4784   case AtomicRMWInst::UMax: NT = ISD::ATOMIC_LOAD_UMAX; break;
4785   case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break;
4786   case AtomicRMWInst::FAdd: NT = ISD::ATOMIC_LOAD_FADD; break;
4787   case AtomicRMWInst::FSub: NT = ISD::ATOMIC_LOAD_FSUB; break;
4788   case AtomicRMWInst::FMax: NT = ISD::ATOMIC_LOAD_FMAX; break;
4789   case AtomicRMWInst::FMin: NT = ISD::ATOMIC_LOAD_FMIN; break;
4790   case AtomicRMWInst::UIncWrap:
4791     NT = ISD::ATOMIC_LOAD_UINC_WRAP;
4792     break;
4793   case AtomicRMWInst::UDecWrap:
4794     NT = ISD::ATOMIC_LOAD_UDEC_WRAP;
4795     break;
4796   }
4797   AtomicOrdering Ordering = I.getOrdering();
4798   SyncScope::ID SSID = I.getSyncScopeID();
4799 
4800   SDValue InChain = getRoot();
4801 
4802   auto MemVT = getValue(I.getValOperand()).getSimpleValueType();
4803   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4804   auto Flags = TLI.getAtomicMemOperandFlags(I, DAG.getDataLayout());
4805 
4806   MachineFunction &MF = DAG.getMachineFunction();
4807   MachineMemOperand *MMO = MF.getMachineMemOperand(
4808       MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4809       DAG.getEVTAlign(MemVT), AAMDNodes(), nullptr, SSID, Ordering);
4810 
4811   SDValue L =
4812     DAG.getAtomic(NT, dl, MemVT, InChain,
4813                   getValue(I.getPointerOperand()), getValue(I.getValOperand()),
4814                   MMO);
4815 
4816   SDValue OutChain = L.getValue(1);
4817 
4818   setValue(&I, L);
4819   DAG.setRoot(OutChain);
4820 }
4821 
4822 void SelectionDAGBuilder::visitFence(const FenceInst &I) {
4823   SDLoc dl = getCurSDLoc();
4824   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4825   SDValue Ops[3];
4826   Ops[0] = getRoot();
4827   Ops[1] = DAG.getTargetConstant((unsigned)I.getOrdering(), dl,
4828                                  TLI.getFenceOperandTy(DAG.getDataLayout()));
4829   Ops[2] = DAG.getTargetConstant(I.getSyncScopeID(), dl,
4830                                  TLI.getFenceOperandTy(DAG.getDataLayout()));
4831   SDValue N = DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops);
4832   setValue(&I, N);
4833   DAG.setRoot(N);
4834 }
4835 
4836 void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
4837   SDLoc dl = getCurSDLoc();
4838   AtomicOrdering Order = I.getOrdering();
4839   SyncScope::ID SSID = I.getSyncScopeID();
4840 
4841   SDValue InChain = getRoot();
4842 
4843   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4844   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4845   EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getType());
4846 
4847   if (!TLI.supportsUnalignedAtomics() &&
4848       I.getAlign().value() < MemVT.getSizeInBits() / 8)
4849     report_fatal_error("Cannot generate unaligned atomic load");
4850 
4851   auto Flags = TLI.getLoadMemOperandFlags(I, DAG.getDataLayout(), AC, LibInfo);
4852 
4853   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4854       MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4855       I.getAlign(), AAMDNodes(), nullptr, SSID, Order);
4856 
4857   InChain = TLI.prepareVolatileOrAtomicLoad(InChain, dl, DAG);
4858 
4859   SDValue Ptr = getValue(I.getPointerOperand());
4860 
4861   if (TLI.lowerAtomicLoadAsLoadSDNode(I)) {
4862     // TODO: Once this is better exercised by tests, it should be merged with
4863     // the normal path for loads to prevent future divergence.
4864     SDValue L = DAG.getLoad(MemVT, dl, InChain, Ptr, MMO);
4865     if (MemVT != VT)
4866       L = DAG.getPtrExtOrTrunc(L, dl, VT);
4867 
4868     setValue(&I, L);
4869     SDValue OutChain = L.getValue(1);
4870     if (!I.isUnordered())
4871       DAG.setRoot(OutChain);
4872     else
4873       PendingLoads.push_back(OutChain);
4874     return;
4875   }
4876 
4877   SDValue L = DAG.getAtomic(ISD::ATOMIC_LOAD, dl, MemVT, MemVT, InChain,
4878                             Ptr, MMO);
4879 
4880   SDValue OutChain = L.getValue(1);
4881   if (MemVT != VT)
4882     L = DAG.getPtrExtOrTrunc(L, dl, VT);
4883 
4884   setValue(&I, L);
4885   DAG.setRoot(OutChain);
4886 }
4887 
4888 void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
4889   SDLoc dl = getCurSDLoc();
4890 
4891   AtomicOrdering Ordering = I.getOrdering();
4892   SyncScope::ID SSID = I.getSyncScopeID();
4893 
4894   SDValue InChain = getRoot();
4895 
4896   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4897   EVT MemVT =
4898       TLI.getMemValueType(DAG.getDataLayout(), I.getValueOperand()->getType());
4899 
4900   if (!TLI.supportsUnalignedAtomics() &&
4901       I.getAlign().value() < MemVT.getSizeInBits() / 8)
4902     report_fatal_error("Cannot generate unaligned atomic store");
4903 
4904   auto Flags = TLI.getStoreMemOperandFlags(I, DAG.getDataLayout());
4905 
4906   MachineFunction &MF = DAG.getMachineFunction();
4907   MachineMemOperand *MMO = MF.getMachineMemOperand(
4908       MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4909       I.getAlign(), AAMDNodes(), nullptr, SSID, Ordering);
4910 
4911   SDValue Val = getValue(I.getValueOperand());
4912   if (Val.getValueType() != MemVT)
4913     Val = DAG.getPtrExtOrTrunc(Val, dl, MemVT);
4914   SDValue Ptr = getValue(I.getPointerOperand());
4915 
4916   if (TLI.lowerAtomicStoreAsStoreSDNode(I)) {
4917     // TODO: Once this is better exercised by tests, it should be merged with
4918     // the normal path for stores to prevent future divergence.
4919     SDValue S = DAG.getStore(InChain, dl, Val, Ptr, MMO);
4920     setValue(&I, S);
4921     DAG.setRoot(S);
4922     return;
4923   }
4924   SDValue OutChain =
4925       DAG.getAtomic(ISD::ATOMIC_STORE, dl, MemVT, InChain, Val, Ptr, MMO);
4926 
4927   setValue(&I, OutChain);
4928   DAG.setRoot(OutChain);
4929 }
4930 
4931 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
4932 /// node.
4933 void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
4934                                                unsigned Intrinsic) {
4935   // Ignore the callsite's attributes. A specific call site may be marked with
4936   // readnone, but the lowering code will expect the chain based on the
4937   // definition.
4938   const Function *F = I.getCalledFunction();
4939   bool HasChain = !F->doesNotAccessMemory();
4940   bool OnlyLoad = HasChain && F->onlyReadsMemory();
4941 
4942   // Build the operand list.
4943   SmallVector<SDValue, 8> Ops;
4944   if (HasChain) {  // If this intrinsic has side-effects, chainify it.
4945     if (OnlyLoad) {
4946       // We don't need to serialize loads against other loads.
4947       Ops.push_back(DAG.getRoot());
4948     } else {
4949       Ops.push_back(getRoot());
4950     }
4951   }
4952 
4953   // Info is set by getTgtMemIntrinsic
4954   TargetLowering::IntrinsicInfo Info;
4955   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4956   bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I,
4957                                                DAG.getMachineFunction(),
4958                                                Intrinsic);
4959 
4960   // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
4961   if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID ||
4962       Info.opc == ISD::INTRINSIC_W_CHAIN)
4963     Ops.push_back(DAG.getTargetConstant(Intrinsic, getCurSDLoc(),
4964                                         TLI.getPointerTy(DAG.getDataLayout())));
4965 
4966   // Add all operands of the call to the operand list.
4967   for (unsigned i = 0, e = I.arg_size(); i != e; ++i) {
4968     const Value *Arg = I.getArgOperand(i);
4969     if (!I.paramHasAttr(i, Attribute::ImmArg)) {
4970       Ops.push_back(getValue(Arg));
4971       continue;
4972     }
4973 
4974     // Use TargetConstant instead of a regular constant for immarg.
4975     EVT VT = TLI.getValueType(DAG.getDataLayout(), Arg->getType(), true);
4976     if (const ConstantInt *CI = dyn_cast<ConstantInt>(Arg)) {
4977       assert(CI->getBitWidth() <= 64 &&
4978              "large intrinsic immediates not handled");
4979       Ops.push_back(DAG.getTargetConstant(*CI, SDLoc(), VT));
4980     } else {
4981       Ops.push_back(
4982           DAG.getTargetConstantFP(*cast<ConstantFP>(Arg), SDLoc(), VT));
4983     }
4984   }
4985 
4986   SmallVector<EVT, 4> ValueVTs;
4987   ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);
4988 
4989   if (HasChain)
4990     ValueVTs.push_back(MVT::Other);
4991 
4992   SDVTList VTs = DAG.getVTList(ValueVTs);
4993 
4994   // Propagate fast-math-flags from IR to node(s).
4995   SDNodeFlags Flags;
4996   if (auto *FPMO = dyn_cast<FPMathOperator>(&I))
4997     Flags.copyFMF(*FPMO);
4998   SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
4999 
5000   // Create the node.
5001   SDValue Result;
5002   // In some cases, custom collection of operands from CallInst I may be needed.
5003   TLI.CollectTargetIntrinsicOperands(I, Ops, DAG);
5004   if (IsTgtIntrinsic) {
5005     // This is target intrinsic that touches memory
5006     //
5007     // TODO: We currently just fallback to address space 0 if getTgtMemIntrinsic
5008     //       didn't yield anything useful.
5009     MachinePointerInfo MPI;
5010     if (Info.ptrVal)
5011       MPI = MachinePointerInfo(Info.ptrVal, Info.offset);
5012     else if (Info.fallbackAddressSpace)
5013       MPI = MachinePointerInfo(*Info.fallbackAddressSpace);
5014     Result = DAG.getMemIntrinsicNode(Info.opc, getCurSDLoc(), VTs, Ops,
5015                                      Info.memVT, MPI, Info.align, Info.flags,
5016                                      Info.size, I.getAAMetadata());
5017   } else if (!HasChain) {
5018     Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurSDLoc(), VTs, Ops);
5019   } else if (!I.getType()->isVoidTy()) {
5020     Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurSDLoc(), VTs, Ops);
5021   } else {
5022     Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops);
5023   }
5024 
5025   if (HasChain) {
5026     SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
5027     if (OnlyLoad)
5028       PendingLoads.push_back(Chain);
5029     else
5030       DAG.setRoot(Chain);
5031   }
5032 
5033   if (!I.getType()->isVoidTy()) {
5034     if (!isa<VectorType>(I.getType()))
5035       Result = lowerRangeToAssertZExt(DAG, I, Result);
5036 
5037     MaybeAlign Alignment = I.getRetAlign();
5038 
5039     // Insert `assertalign` node if there's an alignment.
5040     if (InsertAssertAlign && Alignment) {
5041       Result =
5042           DAG.getAssertAlign(getCurSDLoc(), Result, Alignment.valueOrOne());
5043     }
5044 
5045     setValue(&I, Result);
5046   }
5047 }
5048 
5049 /// GetSignificand - Get the significand and build it into a floating-point
5050 /// number with exponent of 1:
5051 ///
5052 ///   Op = (Op & 0x007fffff) | 0x3f800000;
5053 ///
5054 /// where Op is the hexadecimal representation of floating point value.
5055 static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl) {
5056   SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
5057                            DAG.getConstant(0x007fffff, dl, MVT::i32));
5058   SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
5059                            DAG.getConstant(0x3f800000, dl, MVT::i32));
5060   return DAG.getNode(ISD::BITCAST, dl, MVT::f32, t2);
5061 }
5062 
5063 /// GetExponent - Get the exponent:
5064 ///
5065 ///   (float)(int)(((Op & 0x7f800000) >> 23) - 127);
5066 ///
5067 /// where Op is the hexadecimal representation of floating point value.
5068 static SDValue GetExponent(SelectionDAG &DAG, SDValue Op,
5069                            const TargetLowering &TLI, const SDLoc &dl) {
5070   SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
5071                            DAG.getConstant(0x7f800000, dl, MVT::i32));
5072   SDValue t1 = DAG.getNode(
5073       ISD::SRL, dl, MVT::i32, t0,
5074       DAG.getConstant(23, dl,
5075                       TLI.getShiftAmountTy(MVT::i32, DAG.getDataLayout())));
5076   SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
5077                            DAG.getConstant(127, dl, MVT::i32));
5078   return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
5079 }
5080 
5081 /// getF32Constant - Get 32-bit floating point constant.
5082 static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt,
5083                               const SDLoc &dl) {
5084   return DAG.getConstantFP(APFloat(APFloat::IEEEsingle(), APInt(32, Flt)), dl,
5085                            MVT::f32);
5086 }
5087 
5088 static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl,
5089                                        SelectionDAG &DAG) {
5090   // TODO: What fast-math-flags should be set on the floating-point nodes?
5091 
5092   //   IntegerPartOfX = ((int32_t)(t0);
5093   SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
5094 
5095   //   FractionalPartOfX = t0 - (float)IntegerPartOfX;
5096   SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
5097   SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
5098 
5099   //   IntegerPartOfX <<= 23;
5100   IntegerPartOfX =
5101       DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
5102                   DAG.getConstant(23, dl,
5103                                   DAG.getTargetLoweringInfo().getShiftAmountTy(
5104                                       MVT::i32, DAG.getDataLayout())));
5105 
5106   SDValue TwoToFractionalPartOfX;
5107   if (LimitFloatPrecision <= 6) {
5108     // For floating-point precision of 6:
5109     //
5110     //   TwoToFractionalPartOfX =
5111     //     0.997535578f +
5112     //       (0.735607626f + 0.252464424f * x) * x;
5113     //
5114     // error 0.0144103317, which is 6 bits
5115     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5116                              getF32Constant(DAG, 0x3e814304, dl));
5117     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5118                              getF32Constant(DAG, 0x3f3c50c8, dl));
5119     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5120     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5121                                          getF32Constant(DAG, 0x3f7f5e7e, dl));
5122   } else if (LimitFloatPrecision <= 12) {
5123     // For floating-point precision of 12:
5124     //
5125     //   TwoToFractionalPartOfX =
5126     //     0.999892986f +
5127     //       (0.696457318f +
5128     //         (0.224338339f + 0.792043434e-1f * x) * x) * x;
5129     //
5130     // error 0.000107046256, which is 13 to 14 bits
5131     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5132                              getF32Constant(DAG, 0x3da235e3, dl));
5133     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5134                              getF32Constant(DAG, 0x3e65b8f3, dl));
5135     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5136     SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5137                              getF32Constant(DAG, 0x3f324b07, dl));
5138     SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5139     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5140                                          getF32Constant(DAG, 0x3f7ff8fd, dl));
5141   } else { // LimitFloatPrecision <= 18
5142     // For floating-point precision of 18:
5143     //
5144     //   TwoToFractionalPartOfX =
5145     //     0.999999982f +
5146     //       (0.693148872f +
5147     //         (0.240227044f +
5148     //           (0.554906021e-1f +
5149     //             (0.961591928e-2f +
5150     //               (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
5151     // error 2.47208000*10^(-7), which is better than 18 bits
5152     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5153                              getF32Constant(DAG, 0x3924b03e, dl));
5154     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5155                              getF32Constant(DAG, 0x3ab24b87, dl));
5156     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5157     SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5158                              getF32Constant(DAG, 0x3c1d8c17, dl));
5159     SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5160     SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5161                              getF32Constant(DAG, 0x3d634a1d, dl));
5162     SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5163     SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5164                              getF32Constant(DAG, 0x3e75fe14, dl));
5165     SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5166     SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
5167                               getF32Constant(DAG, 0x3f317234, dl));
5168     SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
5169     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
5170                                          getF32Constant(DAG, 0x3f800000, dl));
5171   }
5172 
5173   // Add the exponent into the result in integer domain.
5174   SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, TwoToFractionalPartOfX);
5175   return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
5176                      DAG.getNode(ISD::ADD, dl, MVT::i32, t13, IntegerPartOfX));
5177 }
5178 
5179 /// expandExp - Lower an exp intrinsic. Handles the special sequences for
5180 /// limited-precision mode.
5181 static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5182                          const TargetLowering &TLI, SDNodeFlags Flags) {
5183   if (Op.getValueType() == MVT::f32 &&
5184       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5185 
5186     // Put the exponent in the right bit position for later addition to the
5187     // final result:
5188     //
5189     // t0 = Op * log2(e)
5190 
5191     // TODO: What fast-math-flags should be set here?
5192     SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
5193                              DAG.getConstantFP(numbers::log2ef, dl, MVT::f32));
5194     return getLimitedPrecisionExp2(t0, dl, DAG);
5195   }
5196 
5197   // No special expansion.
5198   return DAG.getNode(ISD::FEXP, dl, Op.getValueType(), Op, Flags);
5199 }
5200 
5201 /// expandLog - Lower a log intrinsic. Handles the special sequences for
5202 /// limited-precision mode.
5203 static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5204                          const TargetLowering &TLI, SDNodeFlags Flags) {
5205   // TODO: What fast-math-flags should be set on the floating-point nodes?
5206 
5207   if (Op.getValueType() == MVT::f32 &&
5208       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5209     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5210 
5211     // Scale the exponent by log(2).
5212     SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
5213     SDValue LogOfExponent =
5214         DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
5215                     DAG.getConstantFP(numbers::ln2f, dl, MVT::f32));
5216 
5217     // Get the significand and build it into a floating-point number with
5218     // exponent of 1.
5219     SDValue X = GetSignificand(DAG, Op1, dl);
5220 
5221     SDValue LogOfMantissa;
5222     if (LimitFloatPrecision <= 6) {
5223       // For floating-point precision of 6:
5224       //
5225       //   LogofMantissa =
5226       //     -1.1609546f +
5227       //       (1.4034025f - 0.23903021f * x) * x;
5228       //
5229       // error 0.0034276066, which is better than 8 bits
5230       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5231                                getF32Constant(DAG, 0xbe74c456, dl));
5232       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5233                                getF32Constant(DAG, 0x3fb3a2b1, dl));
5234       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5235       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5236                                   getF32Constant(DAG, 0x3f949a29, dl));
5237     } else if (LimitFloatPrecision <= 12) {
5238       // For floating-point precision of 12:
5239       //
5240       //   LogOfMantissa =
5241       //     -1.7417939f +
5242       //       (2.8212026f +
5243       //         (-1.4699568f +
5244       //           (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
5245       //
5246       // error 0.000061011436, which is 14 bits
5247       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5248                                getF32Constant(DAG, 0xbd67b6d6, dl));
5249       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5250                                getF32Constant(DAG, 0x3ee4f4b8, dl));
5251       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5252       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5253                                getF32Constant(DAG, 0x3fbc278b, dl));
5254       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5255       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5256                                getF32Constant(DAG, 0x40348e95, dl));
5257       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5258       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5259                                   getF32Constant(DAG, 0x3fdef31a, dl));
5260     } else { // LimitFloatPrecision <= 18
5261       // For floating-point precision of 18:
5262       //
5263       //   LogOfMantissa =
5264       //     -2.1072184f +
5265       //       (4.2372794f +
5266       //         (-3.7029485f +
5267       //           (2.2781945f +
5268       //             (-0.87823314f +
5269       //               (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
5270       //
5271       // error 0.0000023660568, which is better than 18 bits
5272       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5273                                getF32Constant(DAG, 0xbc91e5ac, dl));
5274       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5275                                getF32Constant(DAG, 0x3e4350aa, dl));
5276       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5277       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5278                                getF32Constant(DAG, 0x3f60d3e3, dl));
5279       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5280       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5281                                getF32Constant(DAG, 0x4011cdf0, dl));
5282       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5283       SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5284                                getF32Constant(DAG, 0x406cfd1c, dl));
5285       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5286       SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5287                                getF32Constant(DAG, 0x408797cb, dl));
5288       SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5289       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
5290                                   getF32Constant(DAG, 0x4006dcab, dl));
5291     }
5292 
5293     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, LogOfMantissa);
5294   }
5295 
5296   // No special expansion.
5297   return DAG.getNode(ISD::FLOG, dl, Op.getValueType(), Op, Flags);
5298 }
5299 
5300 /// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for
5301 /// limited-precision mode.
5302 static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5303                           const TargetLowering &TLI, SDNodeFlags Flags) {
5304   // TODO: What fast-math-flags should be set on the floating-point nodes?
5305 
5306   if (Op.getValueType() == MVT::f32 &&
5307       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5308     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5309 
5310     // Get the exponent.
5311     SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
5312 
5313     // Get the significand and build it into a floating-point number with
5314     // exponent of 1.
5315     SDValue X = GetSignificand(DAG, Op1, dl);
5316 
5317     // Different possible minimax approximations of significand in
5318     // floating-point for various degrees of accuracy over [1,2].
5319     SDValue Log2ofMantissa;
5320     if (LimitFloatPrecision <= 6) {
5321       // For floating-point precision of 6:
5322       //
5323       //   Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
5324       //
5325       // error 0.0049451742, which is more than 7 bits
5326       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5327                                getF32Constant(DAG, 0xbeb08fe0, dl));
5328       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5329                                getF32Constant(DAG, 0x40019463, dl));
5330       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5331       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5332                                    getF32Constant(DAG, 0x3fd6633d, dl));
5333     } else if (LimitFloatPrecision <= 12) {
5334       // For floating-point precision of 12:
5335       //
5336       //   Log2ofMantissa =
5337       //     -2.51285454f +
5338       //       (4.07009056f +
5339       //         (-2.12067489f +
5340       //           (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
5341       //
5342       // error 0.0000876136000, which is better than 13 bits
5343       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5344                                getF32Constant(DAG, 0xbda7262e, dl));
5345       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5346                                getF32Constant(DAG, 0x3f25280b, dl));
5347       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5348       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5349                                getF32Constant(DAG, 0x4007b923, dl));
5350       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5351       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5352                                getF32Constant(DAG, 0x40823e2f, dl));
5353       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5354       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5355                                    getF32Constant(DAG, 0x4020d29c, dl));
5356     } else { // LimitFloatPrecision <= 18
5357       // For floating-point precision of 18:
5358       //
5359       //   Log2ofMantissa =
5360       //     -3.0400495f +
5361       //       (6.1129976f +
5362       //         (-5.3420409f +
5363       //           (3.2865683f +
5364       //             (-1.2669343f +
5365       //               (0.27515199f -
5366       //                 0.25691327e-1f * x) * x) * x) * x) * x) * x;
5367       //
5368       // error 0.0000018516, which is better than 18 bits
5369       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5370                                getF32Constant(DAG, 0xbcd2769e, dl));
5371       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5372                                getF32Constant(DAG, 0x3e8ce0b9, dl));
5373       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5374       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5375                                getF32Constant(DAG, 0x3fa22ae7, dl));
5376       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5377       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5378                                getF32Constant(DAG, 0x40525723, dl));
5379       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5380       SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5381                                getF32Constant(DAG, 0x40aaf200, dl));
5382       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5383       SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5384                                getF32Constant(DAG, 0x40c39dad, dl));
5385       SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5386       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
5387                                    getF32Constant(DAG, 0x4042902c, dl));
5388     }
5389 
5390     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
5391   }
5392 
5393   // No special expansion.
5394   return DAG.getNode(ISD::FLOG2, dl, Op.getValueType(), Op, Flags);
5395 }
5396 
5397 /// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for
5398 /// limited-precision mode.
5399 static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5400                            const TargetLowering &TLI, SDNodeFlags Flags) {
5401   // TODO: What fast-math-flags should be set on the floating-point nodes?
5402 
5403   if (Op.getValueType() == MVT::f32 &&
5404       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5405     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5406 
5407     // Scale the exponent by log10(2) [0.30102999f].
5408     SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
5409     SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
5410                                         getF32Constant(DAG, 0x3e9a209a, dl));
5411 
5412     // Get the significand and build it into a floating-point number with
5413     // exponent of 1.
5414     SDValue X = GetSignificand(DAG, Op1, dl);
5415 
5416     SDValue Log10ofMantissa;
5417     if (LimitFloatPrecision <= 6) {
5418       // For floating-point precision of 6:
5419       //
5420       //   Log10ofMantissa =
5421       //     -0.50419619f +
5422       //       (0.60948995f - 0.10380950f * x) * x;
5423       //
5424       // error 0.0014886165, which is 6 bits
5425       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5426                                getF32Constant(DAG, 0xbdd49a13, dl));
5427       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5428                                getF32Constant(DAG, 0x3f1c0789, dl));
5429       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5430       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5431                                     getF32Constant(DAG, 0x3f011300, dl));
5432     } else if (LimitFloatPrecision <= 12) {
5433       // For floating-point precision of 12:
5434       //
5435       //   Log10ofMantissa =
5436       //     -0.64831180f +
5437       //       (0.91751397f +
5438       //         (-0.31664806f + 0.47637168e-1f * x) * x) * x;
5439       //
5440       // error 0.00019228036, which is better than 12 bits
5441       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5442                                getF32Constant(DAG, 0x3d431f31, dl));
5443       SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
5444                                getF32Constant(DAG, 0x3ea21fb2, dl));
5445       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5446       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5447                                getF32Constant(DAG, 0x3f6ae232, dl));
5448       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5449       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
5450                                     getF32Constant(DAG, 0x3f25f7c3, dl));
5451     } else { // LimitFloatPrecision <= 18
5452       // For floating-point precision of 18:
5453       //
5454       //   Log10ofMantissa =
5455       //     -0.84299375f +
5456       //       (1.5327582f +
5457       //         (-1.0688956f +
5458       //           (0.49102474f +
5459       //             (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
5460       //
5461       // error 0.0000037995730, which is better than 18 bits
5462       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5463                                getF32Constant(DAG, 0x3c5d51ce, dl));
5464       SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
5465                                getF32Constant(DAG, 0x3e00685a, dl));
5466       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5467       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5468                                getF32Constant(DAG, 0x3efb6798, dl));
5469       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5470       SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
5471                                getF32Constant(DAG, 0x3f88d192, dl));
5472       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5473       SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5474                                getF32Constant(DAG, 0x3fc4316c, dl));
5475       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5476       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
5477                                     getF32Constant(DAG, 0x3f57ce70, dl));
5478     }
5479 
5480     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
5481   }
5482 
5483   // No special expansion.
5484   return DAG.getNode(ISD::FLOG10, dl, Op.getValueType(), Op, Flags);
5485 }
5486 
5487 /// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for
5488 /// limited-precision mode.
5489 static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5490                           const TargetLowering &TLI, SDNodeFlags Flags) {
5491   if (Op.getValueType() == MVT::f32 &&
5492       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18)
5493     return getLimitedPrecisionExp2(Op, dl, DAG);
5494 
5495   // No special expansion.
5496   return DAG.getNode(ISD::FEXP2, dl, Op.getValueType(), Op, Flags);
5497 }
5498 
5499 /// visitPow - Lower a pow intrinsic. Handles the special sequences for
5500 /// limited-precision mode with x == 10.0f.
5501 static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS,
5502                          SelectionDAG &DAG, const TargetLowering &TLI,
5503                          SDNodeFlags Flags) {
5504   bool IsExp10 = false;
5505   if (LHS.getValueType() == MVT::f32 && RHS.getValueType() == MVT::f32 &&
5506       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5507     if (ConstantFPSDNode *LHSC = dyn_cast<ConstantFPSDNode>(LHS)) {
5508       APFloat Ten(10.0f);
5509       IsExp10 = LHSC->isExactlyValue(Ten);
5510     }
5511   }
5512 
5513   // TODO: What fast-math-flags should be set on the FMUL node?
5514   if (IsExp10) {
5515     // Put the exponent in the right bit position for later addition to the
5516     // final result:
5517     //
5518     //   #define LOG2OF10 3.3219281f
5519     //   t0 = Op * LOG2OF10;
5520     SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, RHS,
5521                              getF32Constant(DAG, 0x40549a78, dl));
5522     return getLimitedPrecisionExp2(t0, dl, DAG);
5523   }
5524 
5525   // No special expansion.
5526   return DAG.getNode(ISD::FPOW, dl, LHS.getValueType(), LHS, RHS, Flags);
5527 }
5528 
5529 /// ExpandPowI - Expand a llvm.powi intrinsic.
5530 static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS,
5531                           SelectionDAG &DAG) {
5532   // If RHS is a constant, we can expand this out to a multiplication tree if
5533   // it's beneficial on the target, otherwise we end up lowering to a call to
5534   // __powidf2 (for example).
5535   if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
5536     unsigned Val = RHSC->getSExtValue();
5537 
5538     // powi(x, 0) -> 1.0
5539     if (Val == 0)
5540       return DAG.getConstantFP(1.0, DL, LHS.getValueType());
5541 
5542     if (DAG.getTargetLoweringInfo().isBeneficialToExpandPowI(
5543             Val, DAG.shouldOptForSize())) {
5544       // Get the exponent as a positive value.
5545       if ((int)Val < 0)
5546         Val = -Val;
5547       // We use the simple binary decomposition method to generate the multiply
5548       // sequence.  There are more optimal ways to do this (for example,
5549       // powi(x,15) generates one more multiply than it should), but this has
5550       // the benefit of being both really simple and much better than a libcall.
5551       SDValue Res; // Logically starts equal to 1.0
5552       SDValue CurSquare = LHS;
5553       // TODO: Intrinsics should have fast-math-flags that propagate to these
5554       // nodes.
5555       while (Val) {
5556         if (Val & 1) {
5557           if (Res.getNode())
5558             Res =
5559                 DAG.getNode(ISD::FMUL, DL, Res.getValueType(), Res, CurSquare);
5560           else
5561             Res = CurSquare; // 1.0*CurSquare.
5562         }
5563 
5564         CurSquare = DAG.getNode(ISD::FMUL, DL, CurSquare.getValueType(),
5565                                 CurSquare, CurSquare);
5566         Val >>= 1;
5567       }
5568 
5569       // If the original was negative, invert the result, producing 1/(x*x*x).
5570       if (RHSC->getSExtValue() < 0)
5571         Res = DAG.getNode(ISD::FDIV, DL, LHS.getValueType(),
5572                           DAG.getConstantFP(1.0, DL, LHS.getValueType()), Res);
5573       return Res;
5574     }
5575   }
5576 
5577   // Otherwise, expand to a libcall.
5578   return DAG.getNode(ISD::FPOWI, DL, LHS.getValueType(), LHS, RHS);
5579 }
5580 
5581 static SDValue expandDivFix(unsigned Opcode, const SDLoc &DL,
5582                             SDValue LHS, SDValue RHS, SDValue Scale,
5583                             SelectionDAG &DAG, const TargetLowering &TLI) {
5584   EVT VT = LHS.getValueType();
5585   bool Signed = Opcode == ISD::SDIVFIX || Opcode == ISD::SDIVFIXSAT;
5586   bool Saturating = Opcode == ISD::SDIVFIXSAT || Opcode == ISD::UDIVFIXSAT;
5587   LLVMContext &Ctx = *DAG.getContext();
5588 
5589   // If the type is legal but the operation isn't, this node might survive all
5590   // the way to operation legalization. If we end up there and we do not have
5591   // the ability to widen the type (if VT*2 is not legal), we cannot expand the
5592   // node.
5593 
5594   // Coax the legalizer into expanding the node during type legalization instead
5595   // by bumping the size by one bit. This will force it to Promote, enabling the
5596   // early expansion and avoiding the need to expand later.
5597 
5598   // We don't have to do this if Scale is 0; that can always be expanded, unless
5599   // it's a saturating signed operation. Those can experience true integer
5600   // division overflow, a case which we must avoid.
5601 
5602   // FIXME: We wouldn't have to do this (or any of the early
5603   // expansion/promotion) if it was possible to expand a libcall of an
5604   // illegal type during operation legalization. But it's not, so things
5605   // get a bit hacky.
5606   unsigned ScaleInt = cast<ConstantSDNode>(Scale)->getZExtValue();
5607   if ((ScaleInt > 0 || (Saturating && Signed)) &&
5608       (TLI.isTypeLegal(VT) ||
5609        (VT.isVector() && TLI.isTypeLegal(VT.getVectorElementType())))) {
5610     TargetLowering::LegalizeAction Action = TLI.getFixedPointOperationAction(
5611         Opcode, VT, ScaleInt);
5612     if (Action != TargetLowering::Legal && Action != TargetLowering::Custom) {
5613       EVT PromVT;
5614       if (VT.isScalarInteger())
5615         PromVT = EVT::getIntegerVT(Ctx, VT.getSizeInBits() + 1);
5616       else if (VT.isVector()) {
5617         PromVT = VT.getVectorElementType();
5618         PromVT = EVT::getIntegerVT(Ctx, PromVT.getSizeInBits() + 1);
5619         PromVT = EVT::getVectorVT(Ctx, PromVT, VT.getVectorElementCount());
5620       } else
5621         llvm_unreachable("Wrong VT for DIVFIX?");
5622       LHS = DAG.getExtOrTrunc(Signed, LHS, DL, PromVT);
5623       RHS = DAG.getExtOrTrunc(Signed, RHS, DL, PromVT);
5624       EVT ShiftTy = TLI.getShiftAmountTy(PromVT, DAG.getDataLayout());
5625       // For saturating operations, we need to shift up the LHS to get the
5626       // proper saturation width, and then shift down again afterwards.
5627       if (Saturating)
5628         LHS = DAG.getNode(ISD::SHL, DL, PromVT, LHS,
5629                           DAG.getConstant(1, DL, ShiftTy));
5630       SDValue Res = DAG.getNode(Opcode, DL, PromVT, LHS, RHS, Scale);
5631       if (Saturating)
5632         Res = DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, PromVT, Res,
5633                           DAG.getConstant(1, DL, ShiftTy));
5634       return DAG.getZExtOrTrunc(Res, DL, VT);
5635     }
5636   }
5637 
5638   return DAG.getNode(Opcode, DL, VT, LHS, RHS, Scale);
5639 }
5640 
5641 // getUnderlyingArgRegs - Find underlying registers used for a truncated,
5642 // bitcasted, or split argument. Returns a list of <Register, size in bits>
5643 static void
5644 getUnderlyingArgRegs(SmallVectorImpl<std::pair<unsigned, TypeSize>> &Regs,
5645                      const SDValue &N) {
5646   switch (N.getOpcode()) {
5647   case ISD::CopyFromReg: {
5648     SDValue Op = N.getOperand(1);
5649     Regs.emplace_back(cast<RegisterSDNode>(Op)->getReg(),
5650                       Op.getValueType().getSizeInBits());
5651     return;
5652   }
5653   case ISD::BITCAST:
5654   case ISD::AssertZext:
5655   case ISD::AssertSext:
5656   case ISD::TRUNCATE:
5657     getUnderlyingArgRegs(Regs, N.getOperand(0));
5658     return;
5659   case ISD::BUILD_PAIR:
5660   case ISD::BUILD_VECTOR:
5661   case ISD::CONCAT_VECTORS:
5662     for (SDValue Op : N->op_values())
5663       getUnderlyingArgRegs(Regs, Op);
5664     return;
5665   default:
5666     return;
5667   }
5668 }
5669 
5670 /// If the DbgValueInst is a dbg_value of a function argument, create the
5671 /// corresponding DBG_VALUE machine instruction for it now.  At the end of
5672 /// instruction selection, they will be inserted to the entry BB.
5673 /// We don't currently support this for variadic dbg_values, as they shouldn't
5674 /// appear for function arguments or in the prologue.
5675 bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
5676     const Value *V, DILocalVariable *Variable, DIExpression *Expr,
5677     DILocation *DL, FuncArgumentDbgValueKind Kind, const SDValue &N) {
5678   const Argument *Arg = dyn_cast<Argument>(V);
5679   if (!Arg)
5680     return false;
5681 
5682   MachineFunction &MF = DAG.getMachineFunction();
5683   const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
5684 
5685   // Helper to create DBG_INSTR_REFs or DBG_VALUEs, depending on what kind
5686   // we've been asked to pursue.
5687   auto MakeVRegDbgValue = [&](Register Reg, DIExpression *FragExpr,
5688                               bool Indirect) {
5689     if (Reg.isVirtual() && MF.useDebugInstrRef()) {
5690       // For VRegs, in instruction referencing mode, create a DBG_INSTR_REF
5691       // pointing at the VReg, which will be patched up later.
5692       auto &Inst = TII->get(TargetOpcode::DBG_INSTR_REF);
5693       SmallVector<MachineOperand, 1> MOs({MachineOperand::CreateReg(
5694           /* Reg */ Reg, /* isDef */ false, /* isImp */ false,
5695           /* isKill */ false, /* isDead */ false,
5696           /* isUndef */ false, /* isEarlyClobber */ false,
5697           /* SubReg */ 0, /* isDebug */ true)});
5698 
5699       auto *NewDIExpr = FragExpr;
5700       // We don't have an "Indirect" field in DBG_INSTR_REF, fold that into
5701       // the DIExpression.
5702       if (Indirect)
5703         NewDIExpr = DIExpression::prepend(FragExpr, DIExpression::DerefBefore);
5704       SmallVector<uint64_t, 2> Ops({dwarf::DW_OP_LLVM_arg, 0});
5705       NewDIExpr = DIExpression::prependOpcodes(NewDIExpr, Ops);
5706       return BuildMI(MF, DL, Inst, false, MOs, Variable, NewDIExpr);
5707     } else {
5708       // Create a completely standard DBG_VALUE.
5709       auto &Inst = TII->get(TargetOpcode::DBG_VALUE);
5710       return BuildMI(MF, DL, Inst, Indirect, Reg, Variable, FragExpr);
5711     }
5712   };
5713 
5714   if (Kind == FuncArgumentDbgValueKind::Value) {
5715     // ArgDbgValues are hoisted to the beginning of the entry block. So we
5716     // should only emit as ArgDbgValue if the dbg.value intrinsic is found in
5717     // the entry block.
5718     bool IsInEntryBlock = FuncInfo.MBB == &FuncInfo.MF->front();
5719     if (!IsInEntryBlock)
5720       return false;
5721 
5722     // ArgDbgValues are hoisted to the beginning of the entry block.  So we
5723     // should only emit as ArgDbgValue if the dbg.value intrinsic describes a
5724     // variable that also is a param.
5725     //
5726     // Although, if we are at the top of the entry block already, we can still
5727     // emit using ArgDbgValue. This might catch some situations when the
5728     // dbg.value refers to an argument that isn't used in the entry block, so
5729     // any CopyToReg node would be optimized out and the only way to express
5730     // this DBG_VALUE is by using the physical reg (or FI) as done in this
5731     // method.  ArgDbgValues are hoisted to the beginning of the entry block. So
5732     // we should only emit as ArgDbgValue if the Variable is an argument to the
5733     // current function, and the dbg.value intrinsic is found in the entry
5734     // block.
5735     bool VariableIsFunctionInputArg = Variable->isParameter() &&
5736         !DL->getInlinedAt();
5737     bool IsInPrologue = SDNodeOrder == LowestSDNodeOrder;
5738     if (!IsInPrologue && !VariableIsFunctionInputArg)
5739       return false;
5740 
5741     // Here we assume that a function argument on IR level only can be used to
5742     // describe one input parameter on source level. If we for example have
5743     // source code like this
5744     //
5745     //    struct A { long x, y; };
5746     //    void foo(struct A a, long b) {
5747     //      ...
5748     //      b = a.x;
5749     //      ...
5750     //    }
5751     //
5752     // and IR like this
5753     //
5754     //  define void @foo(i32 %a1, i32 %a2, i32 %b)  {
5755     //  entry:
5756     //    call void @llvm.dbg.value(metadata i32 %a1, "a", DW_OP_LLVM_fragment
5757     //    call void @llvm.dbg.value(metadata i32 %a2, "a", DW_OP_LLVM_fragment
5758     //    call void @llvm.dbg.value(metadata i32 %b, "b",
5759     //    ...
5760     //    call void @llvm.dbg.value(metadata i32 %a1, "b"
5761     //    ...
5762     //
5763     // then the last dbg.value is describing a parameter "b" using a value that
5764     // is an argument. But since we already has used %a1 to describe a parameter
5765     // we should not handle that last dbg.value here (that would result in an
5766     // incorrect hoisting of the DBG_VALUE to the function entry).
5767     // Notice that we allow one dbg.value per IR level argument, to accommodate
5768     // for the situation with fragments above.
5769     if (VariableIsFunctionInputArg) {
5770       unsigned ArgNo = Arg->getArgNo();
5771       if (ArgNo >= FuncInfo.DescribedArgs.size())
5772         FuncInfo.DescribedArgs.resize(ArgNo + 1, false);
5773       else if (!IsInPrologue && FuncInfo.DescribedArgs.test(ArgNo))
5774         return false;
5775       FuncInfo.DescribedArgs.set(ArgNo);
5776     }
5777   }
5778 
5779   bool IsIndirect = false;
5780   std::optional<MachineOperand> Op;
5781   // Some arguments' frame index is recorded during argument lowering.
5782   int FI = FuncInfo.getArgumentFrameIndex(Arg);
5783   if (FI != std::numeric_limits<int>::max())
5784     Op = MachineOperand::CreateFI(FI);
5785 
5786   SmallVector<std::pair<unsigned, TypeSize>, 8> ArgRegsAndSizes;
5787   if (!Op && N.getNode()) {
5788     getUnderlyingArgRegs(ArgRegsAndSizes, N);
5789     Register Reg;
5790     if (ArgRegsAndSizes.size() == 1)
5791       Reg = ArgRegsAndSizes.front().first;
5792 
5793     if (Reg && Reg.isVirtual()) {
5794       MachineRegisterInfo &RegInfo = MF.getRegInfo();
5795       Register PR = RegInfo.getLiveInPhysReg(Reg);
5796       if (PR)
5797         Reg = PR;
5798     }
5799     if (Reg) {
5800       Op = MachineOperand::CreateReg(Reg, false);
5801       IsIndirect = Kind != FuncArgumentDbgValueKind::Value;
5802     }
5803   }
5804 
5805   if (!Op && N.getNode()) {
5806     // Check if frame index is available.
5807     SDValue LCandidate = peekThroughBitcasts(N);
5808     if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(LCandidate.getNode()))
5809       if (FrameIndexSDNode *FINode =
5810           dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
5811         Op = MachineOperand::CreateFI(FINode->getIndex());
5812   }
5813 
5814   if (!Op) {
5815     // Create a DBG_VALUE for each decomposed value in ArgRegs to cover Reg
5816     auto splitMultiRegDbgValue = [&](ArrayRef<std::pair<unsigned, TypeSize>>
5817                                          SplitRegs) {
5818       unsigned Offset = 0;
5819       for (const auto &RegAndSize : SplitRegs) {
5820         // If the expression is already a fragment, the current register
5821         // offset+size might extend beyond the fragment. In this case, only
5822         // the register bits that are inside the fragment are relevant.
5823         int RegFragmentSizeInBits = RegAndSize.second;
5824         if (auto ExprFragmentInfo = Expr->getFragmentInfo()) {
5825           uint64_t ExprFragmentSizeInBits = ExprFragmentInfo->SizeInBits;
5826           // The register is entirely outside the expression fragment,
5827           // so is irrelevant for debug info.
5828           if (Offset >= ExprFragmentSizeInBits)
5829             break;
5830           // The register is partially outside the expression fragment, only
5831           // the low bits within the fragment are relevant for debug info.
5832           if (Offset + RegFragmentSizeInBits > ExprFragmentSizeInBits) {
5833             RegFragmentSizeInBits = ExprFragmentSizeInBits - Offset;
5834           }
5835         }
5836 
5837         auto FragmentExpr = DIExpression::createFragmentExpression(
5838             Expr, Offset, RegFragmentSizeInBits);
5839         Offset += RegAndSize.second;
5840         // If a valid fragment expression cannot be created, the variable's
5841         // correct value cannot be determined and so it is set as Undef.
5842         if (!FragmentExpr) {
5843           SDDbgValue *SDV = DAG.getConstantDbgValue(
5844               Variable, Expr, UndefValue::get(V->getType()), DL, SDNodeOrder);
5845           DAG.AddDbgValue(SDV, false);
5846           continue;
5847         }
5848         MachineInstr *NewMI =
5849             MakeVRegDbgValue(RegAndSize.first, *FragmentExpr,
5850                              Kind != FuncArgumentDbgValueKind::Value);
5851         FuncInfo.ArgDbgValues.push_back(NewMI);
5852       }
5853     };
5854 
5855     // Check if ValueMap has reg number.
5856     DenseMap<const Value *, Register>::const_iterator
5857       VMI = FuncInfo.ValueMap.find(V);
5858     if (VMI != FuncInfo.ValueMap.end()) {
5859       const auto &TLI = DAG.getTargetLoweringInfo();
5860       RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), VMI->second,
5861                        V->getType(), std::nullopt);
5862       if (RFV.occupiesMultipleRegs()) {
5863         splitMultiRegDbgValue(RFV.getRegsAndSizes());
5864         return true;
5865       }
5866 
5867       Op = MachineOperand::CreateReg(VMI->second, false);
5868       IsIndirect = Kind != FuncArgumentDbgValueKind::Value;
5869     } else if (ArgRegsAndSizes.size() > 1) {
5870       // This was split due to the calling convention, and no virtual register
5871       // mapping exists for the value.
5872       splitMultiRegDbgValue(ArgRegsAndSizes);
5873       return true;
5874     }
5875   }
5876 
5877   if (!Op)
5878     return false;
5879 
5880   assert(Variable->isValidLocationForIntrinsic(DL) &&
5881          "Expected inlined-at fields to agree");
5882   MachineInstr *NewMI = nullptr;
5883 
5884   if (Op->isReg())
5885     NewMI = MakeVRegDbgValue(Op->getReg(), Expr, IsIndirect);
5886   else
5887     NewMI = BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), true, *Op,
5888                     Variable, Expr);
5889 
5890   // Otherwise, use ArgDbgValues.
5891   FuncInfo.ArgDbgValues.push_back(NewMI);
5892   return true;
5893 }
5894 
5895 /// Return the appropriate SDDbgValue based on N.
5896 SDDbgValue *SelectionDAGBuilder::getDbgValue(SDValue N,
5897                                              DILocalVariable *Variable,
5898                                              DIExpression *Expr,
5899                                              const DebugLoc &dl,
5900                                              unsigned DbgSDNodeOrder) {
5901   if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) {
5902     // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can describe
5903     // stack slot locations.
5904     //
5905     // Consider "int x = 0; int *px = &x;". There are two kinds of interesting
5906     // debug values here after optimization:
5907     //
5908     //   dbg.value(i32* %px, !"int *px", !DIExpression()), and
5909     //   dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref))
5910     //
5911     // Both describe the direct values of their associated variables.
5912     return DAG.getFrameIndexDbgValue(Variable, Expr, FISDN->getIndex(),
5913                                      /*IsIndirect*/ false, dl, DbgSDNodeOrder);
5914   }
5915   return DAG.getDbgValue(Variable, Expr, N.getNode(), N.getResNo(),
5916                          /*IsIndirect*/ false, dl, DbgSDNodeOrder);
5917 }
5918 
5919 static unsigned FixedPointIntrinsicToOpcode(unsigned Intrinsic) {
5920   switch (Intrinsic) {
5921   case Intrinsic::smul_fix:
5922     return ISD::SMULFIX;
5923   case Intrinsic::umul_fix:
5924     return ISD::UMULFIX;
5925   case Intrinsic::smul_fix_sat:
5926     return ISD::SMULFIXSAT;
5927   case Intrinsic::umul_fix_sat:
5928     return ISD::UMULFIXSAT;
5929   case Intrinsic::sdiv_fix:
5930     return ISD::SDIVFIX;
5931   case Intrinsic::udiv_fix:
5932     return ISD::UDIVFIX;
5933   case Intrinsic::sdiv_fix_sat:
5934     return ISD::SDIVFIXSAT;
5935   case Intrinsic::udiv_fix_sat:
5936     return ISD::UDIVFIXSAT;
5937   default:
5938     llvm_unreachable("Unhandled fixed point intrinsic");
5939   }
5940 }
5941 
5942 void SelectionDAGBuilder::lowerCallToExternalSymbol(const CallInst &I,
5943                                            const char *FunctionName) {
5944   assert(FunctionName && "FunctionName must not be nullptr");
5945   SDValue Callee = DAG.getExternalSymbol(
5946       FunctionName,
5947       DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()));
5948   LowerCallTo(I, Callee, I.isTailCall(), I.isMustTailCall());
5949 }
5950 
5951 /// Given a @llvm.call.preallocated.setup, return the corresponding
5952 /// preallocated call.
5953 static const CallBase *FindPreallocatedCall(const Value *PreallocatedSetup) {
5954   assert(cast<CallBase>(PreallocatedSetup)
5955                  ->getCalledFunction()
5956                  ->getIntrinsicID() == Intrinsic::call_preallocated_setup &&
5957          "expected call_preallocated_setup Value");
5958   for (const auto *U : PreallocatedSetup->users()) {
5959     auto *UseCall = cast<CallBase>(U);
5960     const Function *Fn = UseCall->getCalledFunction();
5961     if (!Fn || Fn->getIntrinsicID() != Intrinsic::call_preallocated_arg) {
5962       return UseCall;
5963     }
5964   }
5965   llvm_unreachable("expected corresponding call to preallocated setup/arg");
5966 }
5967 
5968 /// If DI is a debug value with an EntryValue expression, lower it using the
5969 /// corresponding physical register of the associated Argument value
5970 /// (guaranteed to exist by the verifier).
5971 bool SelectionDAGBuilder::visitEntryValueDbgValue(const DbgValueInst &DI) {
5972   DILocalVariable *Variable = DI.getVariable();
5973   DIExpression *Expr = DI.getExpression();
5974   if (!Expr->isEntryValue() || !hasSingleElement(DI.getValues()))
5975     return false;
5976 
5977   // These properties are guaranteed by the verifier.
5978   Argument *Arg = cast<Argument>(DI.getValue(0));
5979   assert(Arg->hasAttribute(Attribute::AttrKind::SwiftAsync));
5980 
5981   auto ArgIt = FuncInfo.ValueMap.find(Arg);
5982   if (ArgIt == FuncInfo.ValueMap.end()) {
5983     LLVM_DEBUG(
5984         dbgs() << "Dropping dbg.value: expression is entry_value but "
5985                   "couldn't find an associated register for the Argument\n");
5986     return true;
5987   }
5988   Register ArgVReg = ArgIt->getSecond();
5989 
5990   for (auto [PhysReg, VirtReg] : FuncInfo.RegInfo->liveins())
5991     if (ArgVReg == VirtReg || ArgVReg == PhysReg) {
5992       SDDbgValue *SDV =
5993           DAG.getVRegDbgValue(Variable, Expr, PhysReg, false /*IsIndidrect*/,
5994                               DI.getDebugLoc(), SDNodeOrder);
5995       DAG.AddDbgValue(SDV, false /*treat as dbg.declare byval parameter*/);
5996       return true;
5997     }
5998   LLVM_DEBUG(dbgs() << "Dropping dbg.value: expression is entry_value but "
5999                        "couldn't find a physical register\n");
6000   return true;
6001 }
6002 
6003 /// Lower the call to the specified intrinsic function.
6004 void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
6005                                              unsigned Intrinsic) {
6006   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6007   SDLoc sdl = getCurSDLoc();
6008   DebugLoc dl = getCurDebugLoc();
6009   SDValue Res;
6010 
6011   SDNodeFlags Flags;
6012   if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
6013     Flags.copyFMF(*FPOp);
6014 
6015   switch (Intrinsic) {
6016   default:
6017     // By default, turn this into a target intrinsic node.
6018     visitTargetIntrinsic(I, Intrinsic);
6019     return;
6020   case Intrinsic::vscale: {
6021     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6022     setValue(&I, DAG.getVScale(sdl, VT, APInt(VT.getSizeInBits(), 1)));
6023     return;
6024   }
6025   case Intrinsic::vastart:  visitVAStart(I); return;
6026   case Intrinsic::vaend:    visitVAEnd(I); return;
6027   case Intrinsic::vacopy:   visitVACopy(I); return;
6028   case Intrinsic::returnaddress:
6029     setValue(&I, DAG.getNode(ISD::RETURNADDR, sdl,
6030                              TLI.getValueType(DAG.getDataLayout(), I.getType()),
6031                              getValue(I.getArgOperand(0))));
6032     return;
6033   case Intrinsic::addressofreturnaddress:
6034     setValue(&I,
6035              DAG.getNode(ISD::ADDROFRETURNADDR, sdl,
6036                          TLI.getValueType(DAG.getDataLayout(), I.getType())));
6037     return;
6038   case Intrinsic::sponentry:
6039     setValue(&I,
6040              DAG.getNode(ISD::SPONENTRY, sdl,
6041                          TLI.getValueType(DAG.getDataLayout(), I.getType())));
6042     return;
6043   case Intrinsic::frameaddress:
6044     setValue(&I, DAG.getNode(ISD::FRAMEADDR, sdl,
6045                              TLI.getFrameIndexTy(DAG.getDataLayout()),
6046                              getValue(I.getArgOperand(0))));
6047     return;
6048   case Intrinsic::read_volatile_register:
6049   case Intrinsic::read_register: {
6050     Value *Reg = I.getArgOperand(0);
6051     SDValue Chain = getRoot();
6052     SDValue RegName =
6053         DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
6054     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6055     Res = DAG.getNode(ISD::READ_REGISTER, sdl,
6056       DAG.getVTList(VT, MVT::Other), Chain, RegName);
6057     setValue(&I, Res);
6058     DAG.setRoot(Res.getValue(1));
6059     return;
6060   }
6061   case Intrinsic::write_register: {
6062     Value *Reg = I.getArgOperand(0);
6063     Value *RegValue = I.getArgOperand(1);
6064     SDValue Chain = getRoot();
6065     SDValue RegName =
6066         DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
6067     DAG.setRoot(DAG.getNode(ISD::WRITE_REGISTER, sdl, MVT::Other, Chain,
6068                             RegName, getValue(RegValue)));
6069     return;
6070   }
6071   case Intrinsic::memcpy: {
6072     const auto &MCI = cast<MemCpyInst>(I);
6073     SDValue Op1 = getValue(I.getArgOperand(0));
6074     SDValue Op2 = getValue(I.getArgOperand(1));
6075     SDValue Op3 = getValue(I.getArgOperand(2));
6076     // @llvm.memcpy defines 0 and 1 to both mean no alignment.
6077     Align DstAlign = MCI.getDestAlign().valueOrOne();
6078     Align SrcAlign = MCI.getSourceAlign().valueOrOne();
6079     Align Alignment = std::min(DstAlign, SrcAlign);
6080     bool isVol = MCI.isVolatile();
6081     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6082     // FIXME: Support passing different dest/src alignments to the memcpy DAG
6083     // node.
6084     SDValue Root = isVol ? getRoot() : getMemoryRoot();
6085     SDValue MC = DAG.getMemcpy(
6086         Root, sdl, Op1, Op2, Op3, Alignment, isVol,
6087         /* AlwaysInline */ false, isTC, MachinePointerInfo(I.getArgOperand(0)),
6088         MachinePointerInfo(I.getArgOperand(1)), I.getAAMetadata(), AA);
6089     updateDAGForMaybeTailCall(MC);
6090     return;
6091   }
6092   case Intrinsic::memcpy_inline: {
6093     const auto &MCI = cast<MemCpyInlineInst>(I);
6094     SDValue Dst = getValue(I.getArgOperand(0));
6095     SDValue Src = getValue(I.getArgOperand(1));
6096     SDValue Size = getValue(I.getArgOperand(2));
6097     assert(isa<ConstantSDNode>(Size) && "memcpy_inline needs constant size");
6098     // @llvm.memcpy.inline defines 0 and 1 to both mean no alignment.
6099     Align DstAlign = MCI.getDestAlign().valueOrOne();
6100     Align SrcAlign = MCI.getSourceAlign().valueOrOne();
6101     Align Alignment = std::min(DstAlign, SrcAlign);
6102     bool isVol = MCI.isVolatile();
6103     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6104     // FIXME: Support passing different dest/src alignments to the memcpy DAG
6105     // node.
6106     SDValue MC = DAG.getMemcpy(
6107         getRoot(), sdl, Dst, Src, Size, Alignment, isVol,
6108         /* AlwaysInline */ true, isTC, MachinePointerInfo(I.getArgOperand(0)),
6109         MachinePointerInfo(I.getArgOperand(1)), I.getAAMetadata(), AA);
6110     updateDAGForMaybeTailCall(MC);
6111     return;
6112   }
6113   case Intrinsic::memset: {
6114     const auto &MSI = cast<MemSetInst>(I);
6115     SDValue Op1 = getValue(I.getArgOperand(0));
6116     SDValue Op2 = getValue(I.getArgOperand(1));
6117     SDValue Op3 = getValue(I.getArgOperand(2));
6118     // @llvm.memset defines 0 and 1 to both mean no alignment.
6119     Align Alignment = MSI.getDestAlign().valueOrOne();
6120     bool isVol = MSI.isVolatile();
6121     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6122     SDValue Root = isVol ? getRoot() : getMemoryRoot();
6123     SDValue MS = DAG.getMemset(
6124         Root, sdl, Op1, Op2, Op3, Alignment, isVol, /* AlwaysInline */ false,
6125         isTC, MachinePointerInfo(I.getArgOperand(0)), I.getAAMetadata());
6126     updateDAGForMaybeTailCall(MS);
6127     return;
6128   }
6129   case Intrinsic::memset_inline: {
6130     const auto &MSII = cast<MemSetInlineInst>(I);
6131     SDValue Dst = getValue(I.getArgOperand(0));
6132     SDValue Value = getValue(I.getArgOperand(1));
6133     SDValue Size = getValue(I.getArgOperand(2));
6134     assert(isa<ConstantSDNode>(Size) && "memset_inline needs constant size");
6135     // @llvm.memset defines 0 and 1 to both mean no alignment.
6136     Align DstAlign = MSII.getDestAlign().valueOrOne();
6137     bool isVol = MSII.isVolatile();
6138     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6139     SDValue Root = isVol ? getRoot() : getMemoryRoot();
6140     SDValue MC = DAG.getMemset(Root, sdl, Dst, Value, Size, DstAlign, isVol,
6141                                /* AlwaysInline */ true, isTC,
6142                                MachinePointerInfo(I.getArgOperand(0)),
6143                                I.getAAMetadata());
6144     updateDAGForMaybeTailCall(MC);
6145     return;
6146   }
6147   case Intrinsic::memmove: {
6148     const auto &MMI = cast<MemMoveInst>(I);
6149     SDValue Op1 = getValue(I.getArgOperand(0));
6150     SDValue Op2 = getValue(I.getArgOperand(1));
6151     SDValue Op3 = getValue(I.getArgOperand(2));
6152     // @llvm.memmove defines 0 and 1 to both mean no alignment.
6153     Align DstAlign = MMI.getDestAlign().valueOrOne();
6154     Align SrcAlign = MMI.getSourceAlign().valueOrOne();
6155     Align Alignment = std::min(DstAlign, SrcAlign);
6156     bool isVol = MMI.isVolatile();
6157     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6158     // FIXME: Support passing different dest/src alignments to the memmove DAG
6159     // node.
6160     SDValue Root = isVol ? getRoot() : getMemoryRoot();
6161     SDValue MM = DAG.getMemmove(Root, sdl, Op1, Op2, Op3, Alignment, isVol,
6162                                 isTC, MachinePointerInfo(I.getArgOperand(0)),
6163                                 MachinePointerInfo(I.getArgOperand(1)),
6164                                 I.getAAMetadata(), AA);
6165     updateDAGForMaybeTailCall(MM);
6166     return;
6167   }
6168   case Intrinsic::memcpy_element_unordered_atomic: {
6169     const AtomicMemCpyInst &MI = cast<AtomicMemCpyInst>(I);
6170     SDValue Dst = getValue(MI.getRawDest());
6171     SDValue Src = getValue(MI.getRawSource());
6172     SDValue Length = getValue(MI.getLength());
6173 
6174     Type *LengthTy = MI.getLength()->getType();
6175     unsigned ElemSz = MI.getElementSizeInBytes();
6176     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6177     SDValue MC =
6178         DAG.getAtomicMemcpy(getRoot(), sdl, Dst, Src, Length, LengthTy, ElemSz,
6179                             isTC, MachinePointerInfo(MI.getRawDest()),
6180                             MachinePointerInfo(MI.getRawSource()));
6181     updateDAGForMaybeTailCall(MC);
6182     return;
6183   }
6184   case Intrinsic::memmove_element_unordered_atomic: {
6185     auto &MI = cast<AtomicMemMoveInst>(I);
6186     SDValue Dst = getValue(MI.getRawDest());
6187     SDValue Src = getValue(MI.getRawSource());
6188     SDValue Length = getValue(MI.getLength());
6189 
6190     Type *LengthTy = MI.getLength()->getType();
6191     unsigned ElemSz = MI.getElementSizeInBytes();
6192     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6193     SDValue MC =
6194         DAG.getAtomicMemmove(getRoot(), sdl, Dst, Src, Length, LengthTy, ElemSz,
6195                              isTC, MachinePointerInfo(MI.getRawDest()),
6196                              MachinePointerInfo(MI.getRawSource()));
6197     updateDAGForMaybeTailCall(MC);
6198     return;
6199   }
6200   case Intrinsic::memset_element_unordered_atomic: {
6201     auto &MI = cast<AtomicMemSetInst>(I);
6202     SDValue Dst = getValue(MI.getRawDest());
6203     SDValue Val = getValue(MI.getValue());
6204     SDValue Length = getValue(MI.getLength());
6205 
6206     Type *LengthTy = MI.getLength()->getType();
6207     unsigned ElemSz = MI.getElementSizeInBytes();
6208     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6209     SDValue MC =
6210         DAG.getAtomicMemset(getRoot(), sdl, Dst, Val, Length, LengthTy, ElemSz,
6211                             isTC, MachinePointerInfo(MI.getRawDest()));
6212     updateDAGForMaybeTailCall(MC);
6213     return;
6214   }
6215   case Intrinsic::call_preallocated_setup: {
6216     const CallBase *PreallocatedCall = FindPreallocatedCall(&I);
6217     SDValue SrcValue = DAG.getSrcValue(PreallocatedCall);
6218     SDValue Res = DAG.getNode(ISD::PREALLOCATED_SETUP, sdl, MVT::Other,
6219                               getRoot(), SrcValue);
6220     setValue(&I, Res);
6221     DAG.setRoot(Res);
6222     return;
6223   }
6224   case Intrinsic::call_preallocated_arg: {
6225     const CallBase *PreallocatedCall = FindPreallocatedCall(I.getOperand(0));
6226     SDValue SrcValue = DAG.getSrcValue(PreallocatedCall);
6227     SDValue Ops[3];
6228     Ops[0] = getRoot();
6229     Ops[1] = SrcValue;
6230     Ops[2] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(1)), sdl,
6231                                    MVT::i32); // arg index
6232     SDValue Res = DAG.getNode(
6233         ISD::PREALLOCATED_ARG, sdl,
6234         DAG.getVTList(TLI.getPointerTy(DAG.getDataLayout()), MVT::Other), Ops);
6235     setValue(&I, Res);
6236     DAG.setRoot(Res.getValue(1));
6237     return;
6238   }
6239   case Intrinsic::dbg_declare: {
6240     const auto &DI = cast<DbgDeclareInst>(I);
6241     // Debug intrinsics are handled separately in assignment tracking mode.
6242     // Some intrinsics are handled right after Argument lowering.
6243     if (AssignmentTrackingEnabled ||
6244         FuncInfo.PreprocessedDbgDeclares.count(&DI))
6245       return;
6246     // Assume dbg.declare can not currently use DIArgList, i.e.
6247     // it is non-variadic.
6248     assert(!DI.hasArgList() && "Only dbg.value should currently use DIArgList");
6249     DILocalVariable *Variable = DI.getVariable();
6250     DIExpression *Expression = DI.getExpression();
6251     dropDanglingDebugInfo(Variable, Expression);
6252     assert(Variable && "Missing variable");
6253     LLVM_DEBUG(dbgs() << "SelectionDAG visiting debug intrinsic: " << DI
6254                       << "\n");
6255     // Check if address has undef value.
6256     const Value *Address = DI.getVariableLocationOp(0);
6257     if (!Address || isa<UndefValue>(Address) ||
6258         (Address->use_empty() && !isa<Argument>(Address))) {
6259       LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI
6260                         << " (bad/undef/unused-arg address)\n");
6261       return;
6262     }
6263 
6264     bool isParameter = Variable->isParameter() || isa<Argument>(Address);
6265 
6266     SDValue &N = NodeMap[Address];
6267     if (!N.getNode() && isa<Argument>(Address))
6268       // Check unused arguments map.
6269       N = UnusedArgNodeMap[Address];
6270     SDDbgValue *SDV;
6271     if (N.getNode()) {
6272       if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
6273         Address = BCI->getOperand(0);
6274       // Parameters are handled specially.
6275       auto FINode = dyn_cast<FrameIndexSDNode>(N.getNode());
6276       if (isParameter && FINode) {
6277         // Byval parameter. We have a frame index at this point.
6278         SDV =
6279             DAG.getFrameIndexDbgValue(Variable, Expression, FINode->getIndex(),
6280                                       /*IsIndirect*/ true, dl, SDNodeOrder);
6281       } else if (isa<Argument>(Address)) {
6282         // Address is an argument, so try to emit its dbg value using
6283         // virtual register info from the FuncInfo.ValueMap.
6284         EmitFuncArgumentDbgValue(Address, Variable, Expression, dl,
6285                                  FuncArgumentDbgValueKind::Declare, N);
6286         return;
6287       } else {
6288         SDV = DAG.getDbgValue(Variable, Expression, N.getNode(), N.getResNo(),
6289                               true, dl, SDNodeOrder);
6290       }
6291       DAG.AddDbgValue(SDV, isParameter);
6292     } else {
6293       // If Address is an argument then try to emit its dbg value using
6294       // virtual register info from the FuncInfo.ValueMap.
6295       if (!EmitFuncArgumentDbgValue(Address, Variable, Expression, dl,
6296                                     FuncArgumentDbgValueKind::Declare, N)) {
6297         LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI
6298                           << " (could not emit func-arg dbg_value)\n");
6299       }
6300     }
6301     return;
6302   }
6303   case Intrinsic::dbg_label: {
6304     const DbgLabelInst &DI = cast<DbgLabelInst>(I);
6305     DILabel *Label = DI.getLabel();
6306     assert(Label && "Missing label");
6307 
6308     SDDbgLabel *SDV;
6309     SDV = DAG.getDbgLabel(Label, dl, SDNodeOrder);
6310     DAG.AddDbgLabel(SDV);
6311     return;
6312   }
6313   case Intrinsic::dbg_assign: {
6314     // Debug intrinsics are handled seperately in assignment tracking mode.
6315     if (AssignmentTrackingEnabled)
6316       return;
6317     // If assignment tracking hasn't been enabled then fall through and treat
6318     // the dbg.assign as a dbg.value.
6319     [[fallthrough]];
6320   }
6321   case Intrinsic::dbg_value: {
6322     // Debug intrinsics are handled seperately in assignment tracking mode.
6323     if (AssignmentTrackingEnabled)
6324       return;
6325     const DbgValueInst &DI = cast<DbgValueInst>(I);
6326     assert(DI.getVariable() && "Missing variable");
6327 
6328     DILocalVariable *Variable = DI.getVariable();
6329     DIExpression *Expression = DI.getExpression();
6330     dropDanglingDebugInfo(Variable, Expression);
6331 
6332     if (visitEntryValueDbgValue(DI))
6333       return;
6334 
6335     if (DI.isKillLocation()) {
6336       handleKillDebugValue(Variable, Expression, DI.getDebugLoc(), SDNodeOrder);
6337       return;
6338     }
6339 
6340     SmallVector<Value *, 4> Values(DI.getValues());
6341     if (Values.empty())
6342       return;
6343 
6344     bool IsVariadic = DI.hasArgList();
6345     if (!handleDebugValue(Values, Variable, Expression, DI.getDebugLoc(),
6346                           SDNodeOrder, IsVariadic))
6347       addDanglingDebugInfo(Values, Variable, Expression, IsVariadic,
6348                            DI.getDebugLoc(), SDNodeOrder);
6349     return;
6350   }
6351 
6352   case Intrinsic::eh_typeid_for: {
6353     // Find the type id for the given typeinfo.
6354     GlobalValue *GV = ExtractTypeInfo(I.getArgOperand(0));
6355     unsigned TypeID = DAG.getMachineFunction().getTypeIDFor(GV);
6356     Res = DAG.getConstant(TypeID, sdl, MVT::i32);
6357     setValue(&I, Res);
6358     return;
6359   }
6360 
6361   case Intrinsic::eh_return_i32:
6362   case Intrinsic::eh_return_i64:
6363     DAG.getMachineFunction().setCallsEHReturn(true);
6364     DAG.setRoot(DAG.getNode(ISD::EH_RETURN, sdl,
6365                             MVT::Other,
6366                             getControlRoot(),
6367                             getValue(I.getArgOperand(0)),
6368                             getValue(I.getArgOperand(1))));
6369     return;
6370   case Intrinsic::eh_unwind_init:
6371     DAG.getMachineFunction().setCallsUnwindInit(true);
6372     return;
6373   case Intrinsic::eh_dwarf_cfa:
6374     setValue(&I, DAG.getNode(ISD::EH_DWARF_CFA, sdl,
6375                              TLI.getPointerTy(DAG.getDataLayout()),
6376                              getValue(I.getArgOperand(0))));
6377     return;
6378   case Intrinsic::eh_sjlj_callsite: {
6379     MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
6380     ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(0));
6381     assert(MMI.getCurrentCallSite() == 0 && "Overlapping call sites!");
6382 
6383     MMI.setCurrentCallSite(CI->getZExtValue());
6384     return;
6385   }
6386   case Intrinsic::eh_sjlj_functioncontext: {
6387     // Get and store the index of the function context.
6388     MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
6389     AllocaInst *FnCtx =
6390       cast<AllocaInst>(I.getArgOperand(0)->stripPointerCasts());
6391     int FI = FuncInfo.StaticAllocaMap[FnCtx];
6392     MFI.setFunctionContextIndex(FI);
6393     return;
6394   }
6395   case Intrinsic::eh_sjlj_setjmp: {
6396     SDValue Ops[2];
6397     Ops[0] = getRoot();
6398     Ops[1] = getValue(I.getArgOperand(0));
6399     SDValue Op = DAG.getNode(ISD::EH_SJLJ_SETJMP, sdl,
6400                              DAG.getVTList(MVT::i32, MVT::Other), Ops);
6401     setValue(&I, Op.getValue(0));
6402     DAG.setRoot(Op.getValue(1));
6403     return;
6404   }
6405   case Intrinsic::eh_sjlj_longjmp:
6406     DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, sdl, MVT::Other,
6407                             getRoot(), getValue(I.getArgOperand(0))));
6408     return;
6409   case Intrinsic::eh_sjlj_setup_dispatch:
6410     DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_SETUP_DISPATCH, sdl, MVT::Other,
6411                             getRoot()));
6412     return;
6413   case Intrinsic::masked_gather:
6414     visitMaskedGather(I);
6415     return;
6416   case Intrinsic::masked_load:
6417     visitMaskedLoad(I);
6418     return;
6419   case Intrinsic::masked_scatter:
6420     visitMaskedScatter(I);
6421     return;
6422   case Intrinsic::masked_store:
6423     visitMaskedStore(I);
6424     return;
6425   case Intrinsic::masked_expandload:
6426     visitMaskedLoad(I, true /* IsExpanding */);
6427     return;
6428   case Intrinsic::masked_compressstore:
6429     visitMaskedStore(I, true /* IsCompressing */);
6430     return;
6431   case Intrinsic::powi:
6432     setValue(&I, ExpandPowI(sdl, getValue(I.getArgOperand(0)),
6433                             getValue(I.getArgOperand(1)), DAG));
6434     return;
6435   case Intrinsic::log:
6436     setValue(&I, expandLog(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6437     return;
6438   case Intrinsic::log2:
6439     setValue(&I,
6440              expandLog2(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6441     return;
6442   case Intrinsic::log10:
6443     setValue(&I,
6444              expandLog10(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6445     return;
6446   case Intrinsic::exp:
6447     setValue(&I, expandExp(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6448     return;
6449   case Intrinsic::exp2:
6450     setValue(&I,
6451              expandExp2(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6452     return;
6453   case Intrinsic::pow:
6454     setValue(&I, expandPow(sdl, getValue(I.getArgOperand(0)),
6455                            getValue(I.getArgOperand(1)), DAG, TLI, Flags));
6456     return;
6457   case Intrinsic::sqrt:
6458   case Intrinsic::fabs:
6459   case Intrinsic::sin:
6460   case Intrinsic::cos:
6461   case Intrinsic::exp10:
6462   case Intrinsic::floor:
6463   case Intrinsic::ceil:
6464   case Intrinsic::trunc:
6465   case Intrinsic::rint:
6466   case Intrinsic::nearbyint:
6467   case Intrinsic::round:
6468   case Intrinsic::roundeven:
6469   case Intrinsic::canonicalize: {
6470     unsigned Opcode;
6471     switch (Intrinsic) {
6472     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
6473     case Intrinsic::sqrt:      Opcode = ISD::FSQRT;      break;
6474     case Intrinsic::fabs:      Opcode = ISD::FABS;       break;
6475     case Intrinsic::sin:       Opcode = ISD::FSIN;       break;
6476     case Intrinsic::cos:       Opcode = ISD::FCOS;       break;
6477     case Intrinsic::exp10:     Opcode = ISD::FEXP10;     break;
6478     case Intrinsic::floor:     Opcode = ISD::FFLOOR;     break;
6479     case Intrinsic::ceil:      Opcode = ISD::FCEIL;      break;
6480     case Intrinsic::trunc:     Opcode = ISD::FTRUNC;     break;
6481     case Intrinsic::rint:      Opcode = ISD::FRINT;      break;
6482     case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break;
6483     case Intrinsic::round:     Opcode = ISD::FROUND;     break;
6484     case Intrinsic::roundeven: Opcode = ISD::FROUNDEVEN; break;
6485     case Intrinsic::canonicalize: Opcode = ISD::FCANONICALIZE; break;
6486     }
6487 
6488     setValue(&I, DAG.getNode(Opcode, sdl,
6489                              getValue(I.getArgOperand(0)).getValueType(),
6490                              getValue(I.getArgOperand(0)), Flags));
6491     return;
6492   }
6493   case Intrinsic::lround:
6494   case Intrinsic::llround:
6495   case Intrinsic::lrint:
6496   case Intrinsic::llrint: {
6497     unsigned Opcode;
6498     switch (Intrinsic) {
6499     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
6500     case Intrinsic::lround:  Opcode = ISD::LROUND;  break;
6501     case Intrinsic::llround: Opcode = ISD::LLROUND; break;
6502     case Intrinsic::lrint:   Opcode = ISD::LRINT;   break;
6503     case Intrinsic::llrint:  Opcode = ISD::LLRINT;  break;
6504     }
6505 
6506     EVT RetVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6507     setValue(&I, DAG.getNode(Opcode, sdl, RetVT,
6508                              getValue(I.getArgOperand(0))));
6509     return;
6510   }
6511   case Intrinsic::minnum:
6512     setValue(&I, DAG.getNode(ISD::FMINNUM, sdl,
6513                              getValue(I.getArgOperand(0)).getValueType(),
6514                              getValue(I.getArgOperand(0)),
6515                              getValue(I.getArgOperand(1)), Flags));
6516     return;
6517   case Intrinsic::maxnum:
6518     setValue(&I, DAG.getNode(ISD::FMAXNUM, sdl,
6519                              getValue(I.getArgOperand(0)).getValueType(),
6520                              getValue(I.getArgOperand(0)),
6521                              getValue(I.getArgOperand(1)), Flags));
6522     return;
6523   case Intrinsic::minimum:
6524     setValue(&I, DAG.getNode(ISD::FMINIMUM, sdl,
6525                              getValue(I.getArgOperand(0)).getValueType(),
6526                              getValue(I.getArgOperand(0)),
6527                              getValue(I.getArgOperand(1)), Flags));
6528     return;
6529   case Intrinsic::maximum:
6530     setValue(&I, DAG.getNode(ISD::FMAXIMUM, sdl,
6531                              getValue(I.getArgOperand(0)).getValueType(),
6532                              getValue(I.getArgOperand(0)),
6533                              getValue(I.getArgOperand(1)), Flags));
6534     return;
6535   case Intrinsic::copysign:
6536     setValue(&I, DAG.getNode(ISD::FCOPYSIGN, sdl,
6537                              getValue(I.getArgOperand(0)).getValueType(),
6538                              getValue(I.getArgOperand(0)),
6539                              getValue(I.getArgOperand(1)), Flags));
6540     return;
6541   case Intrinsic::ldexp:
6542     setValue(&I, DAG.getNode(ISD::FLDEXP, sdl,
6543                              getValue(I.getArgOperand(0)).getValueType(),
6544                              getValue(I.getArgOperand(0)),
6545                              getValue(I.getArgOperand(1)), Flags));
6546     return;
6547   case Intrinsic::frexp: {
6548     SmallVector<EVT, 2> ValueVTs;
6549     ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);
6550     SDVTList VTs = DAG.getVTList(ValueVTs);
6551     setValue(&I,
6552              DAG.getNode(ISD::FFREXP, sdl, VTs, getValue(I.getArgOperand(0))));
6553     return;
6554   }
6555   case Intrinsic::arithmetic_fence: {
6556     setValue(&I, DAG.getNode(ISD::ARITH_FENCE, sdl,
6557                              getValue(I.getArgOperand(0)).getValueType(),
6558                              getValue(I.getArgOperand(0)), Flags));
6559     return;
6560   }
6561   case Intrinsic::fma:
6562     setValue(&I, DAG.getNode(
6563                      ISD::FMA, sdl, getValue(I.getArgOperand(0)).getValueType(),
6564                      getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)),
6565                      getValue(I.getArgOperand(2)), Flags));
6566     return;
6567 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC)                         \
6568   case Intrinsic::INTRINSIC:
6569 #include "llvm/IR/ConstrainedOps.def"
6570     visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(I));
6571     return;
6572 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
6573 #include "llvm/IR/VPIntrinsics.def"
6574     visitVectorPredicationIntrinsic(cast<VPIntrinsic>(I));
6575     return;
6576   case Intrinsic::fptrunc_round: {
6577     // Get the last argument, the metadata and convert it to an integer in the
6578     // call
6579     Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(1))->getMetadata();
6580     std::optional<RoundingMode> RoundMode =
6581         convertStrToRoundingMode(cast<MDString>(MD)->getString());
6582 
6583     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6584 
6585     // Propagate fast-math-flags from IR to node(s).
6586     SDNodeFlags Flags;
6587     Flags.copyFMF(*cast<FPMathOperator>(&I));
6588     SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
6589 
6590     SDValue Result;
6591     Result = DAG.getNode(
6592         ISD::FPTRUNC_ROUND, sdl, VT, getValue(I.getArgOperand(0)),
6593         DAG.getTargetConstant((int)*RoundMode, sdl,
6594                               TLI.getPointerTy(DAG.getDataLayout())));
6595     setValue(&I, Result);
6596 
6597     return;
6598   }
6599   case Intrinsic::fmuladd: {
6600     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6601     if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
6602         TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT)) {
6603       setValue(&I, DAG.getNode(ISD::FMA, sdl,
6604                                getValue(I.getArgOperand(0)).getValueType(),
6605                                getValue(I.getArgOperand(0)),
6606                                getValue(I.getArgOperand(1)),
6607                                getValue(I.getArgOperand(2)), Flags));
6608     } else {
6609       // TODO: Intrinsic calls should have fast-math-flags.
6610       SDValue Mul = DAG.getNode(
6611           ISD::FMUL, sdl, getValue(I.getArgOperand(0)).getValueType(),
6612           getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)), Flags);
6613       SDValue Add = DAG.getNode(ISD::FADD, sdl,
6614                                 getValue(I.getArgOperand(0)).getValueType(),
6615                                 Mul, getValue(I.getArgOperand(2)), Flags);
6616       setValue(&I, Add);
6617     }
6618     return;
6619   }
6620   case Intrinsic::convert_to_fp16:
6621     setValue(&I, DAG.getNode(ISD::BITCAST, sdl, MVT::i16,
6622                              DAG.getNode(ISD::FP_ROUND, sdl, MVT::f16,
6623                                          getValue(I.getArgOperand(0)),
6624                                          DAG.getTargetConstant(0, sdl,
6625                                                                MVT::i32))));
6626     return;
6627   case Intrinsic::convert_from_fp16:
6628     setValue(&I, DAG.getNode(ISD::FP_EXTEND, sdl,
6629                              TLI.getValueType(DAG.getDataLayout(), I.getType()),
6630                              DAG.getNode(ISD::BITCAST, sdl, MVT::f16,
6631                                          getValue(I.getArgOperand(0)))));
6632     return;
6633   case Intrinsic::fptosi_sat: {
6634     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6635     setValue(&I, DAG.getNode(ISD::FP_TO_SINT_SAT, sdl, VT,
6636                              getValue(I.getArgOperand(0)),
6637                              DAG.getValueType(VT.getScalarType())));
6638     return;
6639   }
6640   case Intrinsic::fptoui_sat: {
6641     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6642     setValue(&I, DAG.getNode(ISD::FP_TO_UINT_SAT, sdl, VT,
6643                              getValue(I.getArgOperand(0)),
6644                              DAG.getValueType(VT.getScalarType())));
6645     return;
6646   }
6647   case Intrinsic::set_rounding:
6648     Res = DAG.getNode(ISD::SET_ROUNDING, sdl, MVT::Other,
6649                       {getRoot(), getValue(I.getArgOperand(0))});
6650     setValue(&I, Res);
6651     DAG.setRoot(Res.getValue(0));
6652     return;
6653   case Intrinsic::is_fpclass: {
6654     const DataLayout DLayout = DAG.getDataLayout();
6655     EVT DestVT = TLI.getValueType(DLayout, I.getType());
6656     EVT ArgVT = TLI.getValueType(DLayout, I.getArgOperand(0)->getType());
6657     FPClassTest Test = static_cast<FPClassTest>(
6658         cast<ConstantInt>(I.getArgOperand(1))->getZExtValue());
6659     MachineFunction &MF = DAG.getMachineFunction();
6660     const Function &F = MF.getFunction();
6661     SDValue Op = getValue(I.getArgOperand(0));
6662     SDNodeFlags Flags;
6663     Flags.setNoFPExcept(
6664         !F.getAttributes().hasFnAttr(llvm::Attribute::StrictFP));
6665     // If ISD::IS_FPCLASS should be expanded, do it right now, because the
6666     // expansion can use illegal types. Making expansion early allows
6667     // legalizing these types prior to selection.
6668     if (!TLI.isOperationLegalOrCustom(ISD::IS_FPCLASS, ArgVT)) {
6669       SDValue Result = TLI.expandIS_FPCLASS(DestVT, Op, Test, Flags, sdl, DAG);
6670       setValue(&I, Result);
6671       return;
6672     }
6673 
6674     SDValue Check = DAG.getTargetConstant(Test, sdl, MVT::i32);
6675     SDValue V = DAG.getNode(ISD::IS_FPCLASS, sdl, DestVT, {Op, Check}, Flags);
6676     setValue(&I, V);
6677     return;
6678   }
6679   case Intrinsic::get_fpenv: {
6680     const DataLayout DLayout = DAG.getDataLayout();
6681     EVT EnvVT = TLI.getValueType(DLayout, I.getType());
6682     Align TempAlign = DAG.getEVTAlign(EnvVT);
6683     SDValue Chain = getRoot();
6684     // Use GET_FPENV if it is legal or custom. Otherwise use memory-based node
6685     // and temporary storage in stack.
6686     if (TLI.isOperationLegalOrCustom(ISD::GET_FPENV, EnvVT)) {
6687       Res = DAG.getNode(
6688           ISD::GET_FPENV, sdl,
6689           DAG.getVTList(TLI.getValueType(DAG.getDataLayout(), I.getType()),
6690                         MVT::Other),
6691           Chain);
6692     } else {
6693       SDValue Temp = DAG.CreateStackTemporary(EnvVT, TempAlign.value());
6694       int SPFI = cast<FrameIndexSDNode>(Temp.getNode())->getIndex();
6695       auto MPI =
6696           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
6697       MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
6698           MPI, MachineMemOperand::MOStore, MemoryLocation::UnknownSize,
6699           TempAlign);
6700       Chain = DAG.getGetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
6701       Res = DAG.getLoad(EnvVT, sdl, Chain, Temp, MPI);
6702     }
6703     setValue(&I, Res);
6704     DAG.setRoot(Res.getValue(1));
6705     return;
6706   }
6707   case Intrinsic::set_fpenv: {
6708     const DataLayout DLayout = DAG.getDataLayout();
6709     SDValue Env = getValue(I.getArgOperand(0));
6710     EVT EnvVT = Env.getValueType();
6711     Align TempAlign = DAG.getEVTAlign(EnvVT);
6712     SDValue Chain = getRoot();
6713     // If SET_FPENV is custom or legal, use it. Otherwise use loading
6714     // environment from memory.
6715     if (TLI.isOperationLegalOrCustom(ISD::SET_FPENV, EnvVT)) {
6716       Chain = DAG.getNode(ISD::SET_FPENV, sdl, MVT::Other, Chain, Env);
6717     } else {
6718       // Allocate space in stack, copy environment bits into it and use this
6719       // memory in SET_FPENV_MEM.
6720       SDValue Temp = DAG.CreateStackTemporary(EnvVT, TempAlign.value());
6721       int SPFI = cast<FrameIndexSDNode>(Temp.getNode())->getIndex();
6722       auto MPI =
6723           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
6724       Chain = DAG.getStore(Chain, sdl, Env, Temp, MPI, TempAlign,
6725                            MachineMemOperand::MOStore);
6726       MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
6727           MPI, MachineMemOperand::MOLoad, MemoryLocation::UnknownSize,
6728           TempAlign);
6729       Chain = DAG.getSetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
6730     }
6731     DAG.setRoot(Chain);
6732     return;
6733   }
6734   case Intrinsic::reset_fpenv:
6735     DAG.setRoot(DAG.getNode(ISD::RESET_FPENV, sdl, MVT::Other, getRoot()));
6736     return;
6737   case Intrinsic::get_fpmode:
6738     Res = DAG.getNode(
6739         ISD::GET_FPMODE, sdl,
6740         DAG.getVTList(TLI.getValueType(DAG.getDataLayout(), I.getType()),
6741                       MVT::Other),
6742         DAG.getRoot());
6743     setValue(&I, Res);
6744     DAG.setRoot(Res.getValue(1));
6745     return;
6746   case Intrinsic::set_fpmode:
6747     Res = DAG.getNode(ISD::SET_FPMODE, sdl, MVT::Other, {DAG.getRoot()},
6748                       getValue(I.getArgOperand(0)));
6749     DAG.setRoot(Res);
6750     return;
6751   case Intrinsic::reset_fpmode: {
6752     Res = DAG.getNode(ISD::RESET_FPMODE, sdl, MVT::Other, getRoot());
6753     DAG.setRoot(Res);
6754     return;
6755   }
6756   case Intrinsic::pcmarker: {
6757     SDValue Tmp = getValue(I.getArgOperand(0));
6758     DAG.setRoot(DAG.getNode(ISD::PCMARKER, sdl, MVT::Other, getRoot(), Tmp));
6759     return;
6760   }
6761   case Intrinsic::readcyclecounter: {
6762     SDValue Op = getRoot();
6763     Res = DAG.getNode(ISD::READCYCLECOUNTER, sdl,
6764                       DAG.getVTList(MVT::i64, MVT::Other), Op);
6765     setValue(&I, Res);
6766     DAG.setRoot(Res.getValue(1));
6767     return;
6768   }
6769   case Intrinsic::bitreverse:
6770     setValue(&I, DAG.getNode(ISD::BITREVERSE, sdl,
6771                              getValue(I.getArgOperand(0)).getValueType(),
6772                              getValue(I.getArgOperand(0))));
6773     return;
6774   case Intrinsic::bswap:
6775     setValue(&I, DAG.getNode(ISD::BSWAP, sdl,
6776                              getValue(I.getArgOperand(0)).getValueType(),
6777                              getValue(I.getArgOperand(0))));
6778     return;
6779   case Intrinsic::cttz: {
6780     SDValue Arg = getValue(I.getArgOperand(0));
6781     ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
6782     EVT Ty = Arg.getValueType();
6783     setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_UNDEF,
6784                              sdl, Ty, Arg));
6785     return;
6786   }
6787   case Intrinsic::ctlz: {
6788     SDValue Arg = getValue(I.getArgOperand(0));
6789     ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
6790     EVT Ty = Arg.getValueType();
6791     setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_UNDEF,
6792                              sdl, Ty, Arg));
6793     return;
6794   }
6795   case Intrinsic::ctpop: {
6796     SDValue Arg = getValue(I.getArgOperand(0));
6797     EVT Ty = Arg.getValueType();
6798     setValue(&I, DAG.getNode(ISD::CTPOP, sdl, Ty, Arg));
6799     return;
6800   }
6801   case Intrinsic::fshl:
6802   case Intrinsic::fshr: {
6803     bool IsFSHL = Intrinsic == Intrinsic::fshl;
6804     SDValue X = getValue(I.getArgOperand(0));
6805     SDValue Y = getValue(I.getArgOperand(1));
6806     SDValue Z = getValue(I.getArgOperand(2));
6807     EVT VT = X.getValueType();
6808 
6809     if (X == Y) {
6810       auto RotateOpcode = IsFSHL ? ISD::ROTL : ISD::ROTR;
6811       setValue(&I, DAG.getNode(RotateOpcode, sdl, VT, X, Z));
6812     } else {
6813       auto FunnelOpcode = IsFSHL ? ISD::FSHL : ISD::FSHR;
6814       setValue(&I, DAG.getNode(FunnelOpcode, sdl, VT, X, Y, Z));
6815     }
6816     return;
6817   }
6818   case Intrinsic::sadd_sat: {
6819     SDValue Op1 = getValue(I.getArgOperand(0));
6820     SDValue Op2 = getValue(I.getArgOperand(1));
6821     setValue(&I, DAG.getNode(ISD::SADDSAT, sdl, Op1.getValueType(), Op1, Op2));
6822     return;
6823   }
6824   case Intrinsic::uadd_sat: {
6825     SDValue Op1 = getValue(I.getArgOperand(0));
6826     SDValue Op2 = getValue(I.getArgOperand(1));
6827     setValue(&I, DAG.getNode(ISD::UADDSAT, sdl, Op1.getValueType(), Op1, Op2));
6828     return;
6829   }
6830   case Intrinsic::ssub_sat: {
6831     SDValue Op1 = getValue(I.getArgOperand(0));
6832     SDValue Op2 = getValue(I.getArgOperand(1));
6833     setValue(&I, DAG.getNode(ISD::SSUBSAT, sdl, Op1.getValueType(), Op1, Op2));
6834     return;
6835   }
6836   case Intrinsic::usub_sat: {
6837     SDValue Op1 = getValue(I.getArgOperand(0));
6838     SDValue Op2 = getValue(I.getArgOperand(1));
6839     setValue(&I, DAG.getNode(ISD::USUBSAT, sdl, Op1.getValueType(), Op1, Op2));
6840     return;
6841   }
6842   case Intrinsic::sshl_sat: {
6843     SDValue Op1 = getValue(I.getArgOperand(0));
6844     SDValue Op2 = getValue(I.getArgOperand(1));
6845     setValue(&I, DAG.getNode(ISD::SSHLSAT, sdl, Op1.getValueType(), Op1, Op2));
6846     return;
6847   }
6848   case Intrinsic::ushl_sat: {
6849     SDValue Op1 = getValue(I.getArgOperand(0));
6850     SDValue Op2 = getValue(I.getArgOperand(1));
6851     setValue(&I, DAG.getNode(ISD::USHLSAT, sdl, Op1.getValueType(), Op1, Op2));
6852     return;
6853   }
6854   case Intrinsic::smul_fix:
6855   case Intrinsic::umul_fix:
6856   case Intrinsic::smul_fix_sat:
6857   case Intrinsic::umul_fix_sat: {
6858     SDValue Op1 = getValue(I.getArgOperand(0));
6859     SDValue Op2 = getValue(I.getArgOperand(1));
6860     SDValue Op3 = getValue(I.getArgOperand(2));
6861     setValue(&I, DAG.getNode(FixedPointIntrinsicToOpcode(Intrinsic), sdl,
6862                              Op1.getValueType(), Op1, Op2, Op3));
6863     return;
6864   }
6865   case Intrinsic::sdiv_fix:
6866   case Intrinsic::udiv_fix:
6867   case Intrinsic::sdiv_fix_sat:
6868   case Intrinsic::udiv_fix_sat: {
6869     SDValue Op1 = getValue(I.getArgOperand(0));
6870     SDValue Op2 = getValue(I.getArgOperand(1));
6871     SDValue Op3 = getValue(I.getArgOperand(2));
6872     setValue(&I, expandDivFix(FixedPointIntrinsicToOpcode(Intrinsic), sdl,
6873                               Op1, Op2, Op3, DAG, TLI));
6874     return;
6875   }
6876   case Intrinsic::smax: {
6877     SDValue Op1 = getValue(I.getArgOperand(0));
6878     SDValue Op2 = getValue(I.getArgOperand(1));
6879     setValue(&I, DAG.getNode(ISD::SMAX, sdl, Op1.getValueType(), Op1, Op2));
6880     return;
6881   }
6882   case Intrinsic::smin: {
6883     SDValue Op1 = getValue(I.getArgOperand(0));
6884     SDValue Op2 = getValue(I.getArgOperand(1));
6885     setValue(&I, DAG.getNode(ISD::SMIN, sdl, Op1.getValueType(), Op1, Op2));
6886     return;
6887   }
6888   case Intrinsic::umax: {
6889     SDValue Op1 = getValue(I.getArgOperand(0));
6890     SDValue Op2 = getValue(I.getArgOperand(1));
6891     setValue(&I, DAG.getNode(ISD::UMAX, sdl, Op1.getValueType(), Op1, Op2));
6892     return;
6893   }
6894   case Intrinsic::umin: {
6895     SDValue Op1 = getValue(I.getArgOperand(0));
6896     SDValue Op2 = getValue(I.getArgOperand(1));
6897     setValue(&I, DAG.getNode(ISD::UMIN, sdl, Op1.getValueType(), Op1, Op2));
6898     return;
6899   }
6900   case Intrinsic::abs: {
6901     // TODO: Preserve "int min is poison" arg in SDAG?
6902     SDValue Op1 = getValue(I.getArgOperand(0));
6903     setValue(&I, DAG.getNode(ISD::ABS, sdl, Op1.getValueType(), Op1));
6904     return;
6905   }
6906   case Intrinsic::stacksave: {
6907     SDValue Op = getRoot();
6908     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6909     Res = DAG.getNode(ISD::STACKSAVE, sdl, DAG.getVTList(VT, MVT::Other), Op);
6910     setValue(&I, Res);
6911     DAG.setRoot(Res.getValue(1));
6912     return;
6913   }
6914   case Intrinsic::stackrestore:
6915     Res = getValue(I.getArgOperand(0));
6916     DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other, getRoot(), Res));
6917     return;
6918   case Intrinsic::get_dynamic_area_offset: {
6919     SDValue Op = getRoot();
6920     EVT PtrTy = TLI.getFrameIndexTy(DAG.getDataLayout());
6921     EVT ResTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
6922     // Result type for @llvm.get.dynamic.area.offset should match PtrTy for
6923     // target.
6924     if (PtrTy.getFixedSizeInBits() < ResTy.getFixedSizeInBits())
6925       report_fatal_error("Wrong result type for @llvm.get.dynamic.area.offset"
6926                          " intrinsic!");
6927     Res = DAG.getNode(ISD::GET_DYNAMIC_AREA_OFFSET, sdl, DAG.getVTList(ResTy),
6928                       Op);
6929     DAG.setRoot(Op);
6930     setValue(&I, Res);
6931     return;
6932   }
6933   case Intrinsic::stackguard: {
6934     MachineFunction &MF = DAG.getMachineFunction();
6935     const Module &M = *MF.getFunction().getParent();
6936     SDValue Chain = getRoot();
6937     if (TLI.useLoadStackGuardNode()) {
6938       Res = getLoadStackGuard(DAG, sdl, Chain);
6939     } else {
6940       EVT PtrTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
6941       const Value *Global = TLI.getSDagStackGuard(M);
6942       Align Align = DAG.getDataLayout().getPrefTypeAlign(Global->getType());
6943       Res = DAG.getLoad(PtrTy, sdl, Chain, getValue(Global),
6944                         MachinePointerInfo(Global, 0), Align,
6945                         MachineMemOperand::MOVolatile);
6946     }
6947     if (TLI.useStackGuardXorFP())
6948       Res = TLI.emitStackGuardXorFP(DAG, Res, sdl);
6949     DAG.setRoot(Chain);
6950     setValue(&I, Res);
6951     return;
6952   }
6953   case Intrinsic::stackprotector: {
6954     // Emit code into the DAG to store the stack guard onto the stack.
6955     MachineFunction &MF = DAG.getMachineFunction();
6956     MachineFrameInfo &MFI = MF.getFrameInfo();
6957     SDValue Src, Chain = getRoot();
6958 
6959     if (TLI.useLoadStackGuardNode())
6960       Src = getLoadStackGuard(DAG, sdl, Chain);
6961     else
6962       Src = getValue(I.getArgOperand(0));   // The guard's value.
6963 
6964     AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
6965 
6966     int FI = FuncInfo.StaticAllocaMap[Slot];
6967     MFI.setStackProtectorIndex(FI);
6968     EVT PtrTy = TLI.getFrameIndexTy(DAG.getDataLayout());
6969 
6970     SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
6971 
6972     // Store the stack protector onto the stack.
6973     Res = DAG.getStore(
6974         Chain, sdl, Src, FIN,
6975         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
6976         MaybeAlign(), MachineMemOperand::MOVolatile);
6977     setValue(&I, Res);
6978     DAG.setRoot(Res);
6979     return;
6980   }
6981   case Intrinsic::objectsize:
6982     llvm_unreachable("llvm.objectsize.* should have been lowered already");
6983 
6984   case Intrinsic::is_constant:
6985     llvm_unreachable("llvm.is.constant.* should have been lowered already");
6986 
6987   case Intrinsic::annotation:
6988   case Intrinsic::ptr_annotation:
6989   case Intrinsic::launder_invariant_group:
6990   case Intrinsic::strip_invariant_group:
6991     // Drop the intrinsic, but forward the value
6992     setValue(&I, getValue(I.getOperand(0)));
6993     return;
6994 
6995   case Intrinsic::assume:
6996   case Intrinsic::experimental_noalias_scope_decl:
6997   case Intrinsic::var_annotation:
6998   case Intrinsic::sideeffect:
6999     // Discard annotate attributes, noalias scope declarations, assumptions, and
7000     // artificial side-effects.
7001     return;
7002 
7003   case Intrinsic::codeview_annotation: {
7004     // Emit a label associated with this metadata.
7005     MachineFunction &MF = DAG.getMachineFunction();
7006     MCSymbol *Label =
7007         MF.getMMI().getContext().createTempSymbol("annotation", true);
7008     Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(0))->getMetadata();
7009     MF.addCodeViewAnnotation(Label, cast<MDNode>(MD));
7010     Res = DAG.getLabelNode(ISD::ANNOTATION_LABEL, sdl, getRoot(), Label);
7011     DAG.setRoot(Res);
7012     return;
7013   }
7014 
7015   case Intrinsic::init_trampoline: {
7016     const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts());
7017 
7018     SDValue Ops[6];
7019     Ops[0] = getRoot();
7020     Ops[1] = getValue(I.getArgOperand(0));
7021     Ops[2] = getValue(I.getArgOperand(1));
7022     Ops[3] = getValue(I.getArgOperand(2));
7023     Ops[4] = DAG.getSrcValue(I.getArgOperand(0));
7024     Ops[5] = DAG.getSrcValue(F);
7025 
7026     Res = DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other, Ops);
7027 
7028     DAG.setRoot(Res);
7029     return;
7030   }
7031   case Intrinsic::adjust_trampoline:
7032     setValue(&I, DAG.getNode(ISD::ADJUST_TRAMPOLINE, sdl,
7033                              TLI.getPointerTy(DAG.getDataLayout()),
7034                              getValue(I.getArgOperand(0))));
7035     return;
7036   case Intrinsic::gcroot: {
7037     assert(DAG.getMachineFunction().getFunction().hasGC() &&
7038            "only valid in functions with gc specified, enforced by Verifier");
7039     assert(GFI && "implied by previous");
7040     const Value *Alloca = I.getArgOperand(0)->stripPointerCasts();
7041     const Constant *TypeMap = cast<Constant>(I.getArgOperand(1));
7042 
7043     FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
7044     GFI->addStackRoot(FI->getIndex(), TypeMap);
7045     return;
7046   }
7047   case Intrinsic::gcread:
7048   case Intrinsic::gcwrite:
7049     llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
7050   case Intrinsic::get_rounding:
7051     Res = DAG.getNode(ISD::GET_ROUNDING, sdl, {MVT::i32, MVT::Other}, getRoot());
7052     setValue(&I, Res);
7053     DAG.setRoot(Res.getValue(1));
7054     return;
7055 
7056   case Intrinsic::expect:
7057     // Just replace __builtin_expect(exp, c) with EXP.
7058     setValue(&I, getValue(I.getArgOperand(0)));
7059     return;
7060 
7061   case Intrinsic::ubsantrap:
7062   case Intrinsic::debugtrap:
7063   case Intrinsic::trap: {
7064     StringRef TrapFuncName =
7065         I.getAttributes().getFnAttr("trap-func-name").getValueAsString();
7066     if (TrapFuncName.empty()) {
7067       switch (Intrinsic) {
7068       case Intrinsic::trap:
7069         DAG.setRoot(DAG.getNode(ISD::TRAP, sdl, MVT::Other, getRoot()));
7070         break;
7071       case Intrinsic::debugtrap:
7072         DAG.setRoot(DAG.getNode(ISD::DEBUGTRAP, sdl, MVT::Other, getRoot()));
7073         break;
7074       case Intrinsic::ubsantrap:
7075         DAG.setRoot(DAG.getNode(
7076             ISD::UBSANTRAP, sdl, MVT::Other, getRoot(),
7077             DAG.getTargetConstant(
7078                 cast<ConstantInt>(I.getArgOperand(0))->getZExtValue(), sdl,
7079                 MVT::i32)));
7080         break;
7081       default: llvm_unreachable("unknown trap intrinsic");
7082       }
7083       return;
7084     }
7085     TargetLowering::ArgListTy Args;
7086     if (Intrinsic == Intrinsic::ubsantrap) {
7087       Args.push_back(TargetLoweringBase::ArgListEntry());
7088       Args[0].Val = I.getArgOperand(0);
7089       Args[0].Node = getValue(Args[0].Val);
7090       Args[0].Ty = Args[0].Val->getType();
7091     }
7092 
7093     TargetLowering::CallLoweringInfo CLI(DAG);
7094     CLI.setDebugLoc(sdl).setChain(getRoot()).setLibCallee(
7095         CallingConv::C, I.getType(),
7096         DAG.getExternalSymbol(TrapFuncName.data(),
7097                               TLI.getPointerTy(DAG.getDataLayout())),
7098         std::move(Args));
7099 
7100     std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
7101     DAG.setRoot(Result.second);
7102     return;
7103   }
7104 
7105   case Intrinsic::uadd_with_overflow:
7106   case Intrinsic::sadd_with_overflow:
7107   case Intrinsic::usub_with_overflow:
7108   case Intrinsic::ssub_with_overflow:
7109   case Intrinsic::umul_with_overflow:
7110   case Intrinsic::smul_with_overflow: {
7111     ISD::NodeType Op;
7112     switch (Intrinsic) {
7113     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
7114     case Intrinsic::uadd_with_overflow: Op = ISD::UADDO; break;
7115     case Intrinsic::sadd_with_overflow: Op = ISD::SADDO; break;
7116     case Intrinsic::usub_with_overflow: Op = ISD::USUBO; break;
7117     case Intrinsic::ssub_with_overflow: Op = ISD::SSUBO; break;
7118     case Intrinsic::umul_with_overflow: Op = ISD::UMULO; break;
7119     case Intrinsic::smul_with_overflow: Op = ISD::SMULO; break;
7120     }
7121     SDValue Op1 = getValue(I.getArgOperand(0));
7122     SDValue Op2 = getValue(I.getArgOperand(1));
7123 
7124     EVT ResultVT = Op1.getValueType();
7125     EVT OverflowVT = MVT::i1;
7126     if (ResultVT.isVector())
7127       OverflowVT = EVT::getVectorVT(
7128           *Context, OverflowVT, ResultVT.getVectorElementCount());
7129 
7130     SDVTList VTs = DAG.getVTList(ResultVT, OverflowVT);
7131     setValue(&I, DAG.getNode(Op, sdl, VTs, Op1, Op2));
7132     return;
7133   }
7134   case Intrinsic::prefetch: {
7135     SDValue Ops[5];
7136     unsigned rw = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
7137     auto Flags = rw == 0 ? MachineMemOperand::MOLoad :MachineMemOperand::MOStore;
7138     Ops[0] = DAG.getRoot();
7139     Ops[1] = getValue(I.getArgOperand(0));
7140     Ops[2] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(1)), sdl,
7141                                    MVT::i32);
7142     Ops[3] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(2)), sdl,
7143                                    MVT::i32);
7144     Ops[4] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(3)), sdl,
7145                                    MVT::i32);
7146     SDValue Result = DAG.getMemIntrinsicNode(
7147         ISD::PREFETCH, sdl, DAG.getVTList(MVT::Other), Ops,
7148         EVT::getIntegerVT(*Context, 8), MachinePointerInfo(I.getArgOperand(0)),
7149         /* align */ std::nullopt, Flags);
7150 
7151     // Chain the prefetch in parallell with any pending loads, to stay out of
7152     // the way of later optimizations.
7153     PendingLoads.push_back(Result);
7154     Result = getRoot();
7155     DAG.setRoot(Result);
7156     return;
7157   }
7158   case Intrinsic::lifetime_start:
7159   case Intrinsic::lifetime_end: {
7160     bool IsStart = (Intrinsic == Intrinsic::lifetime_start);
7161     // Stack coloring is not enabled in O0, discard region information.
7162     if (TM.getOptLevel() == CodeGenOptLevel::None)
7163       return;
7164 
7165     const int64_t ObjectSize =
7166         cast<ConstantInt>(I.getArgOperand(0))->getSExtValue();
7167     Value *const ObjectPtr = I.getArgOperand(1);
7168     SmallVector<const Value *, 4> Allocas;
7169     getUnderlyingObjects(ObjectPtr, Allocas);
7170 
7171     for (const Value *Alloca : Allocas) {
7172       const AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(Alloca);
7173 
7174       // Could not find an Alloca.
7175       if (!LifetimeObject)
7176         continue;
7177 
7178       // First check that the Alloca is static, otherwise it won't have a
7179       // valid frame index.
7180       auto SI = FuncInfo.StaticAllocaMap.find(LifetimeObject);
7181       if (SI == FuncInfo.StaticAllocaMap.end())
7182         return;
7183 
7184       const int FrameIndex = SI->second;
7185       int64_t Offset;
7186       if (GetPointerBaseWithConstantOffset(
7187               ObjectPtr, Offset, DAG.getDataLayout()) != LifetimeObject)
7188         Offset = -1; // Cannot determine offset from alloca to lifetime object.
7189       Res = DAG.getLifetimeNode(IsStart, sdl, getRoot(), FrameIndex, ObjectSize,
7190                                 Offset);
7191       DAG.setRoot(Res);
7192     }
7193     return;
7194   }
7195   case Intrinsic::pseudoprobe: {
7196     auto Guid = cast<ConstantInt>(I.getArgOperand(0))->getZExtValue();
7197     auto Index = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
7198     auto Attr = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue();
7199     Res = DAG.getPseudoProbeNode(sdl, getRoot(), Guid, Index, Attr);
7200     DAG.setRoot(Res);
7201     return;
7202   }
7203   case Intrinsic::invariant_start:
7204     // Discard region information.
7205     setValue(&I,
7206              DAG.getUNDEF(TLI.getValueType(DAG.getDataLayout(), I.getType())));
7207     return;
7208   case Intrinsic::invariant_end:
7209     // Discard region information.
7210     return;
7211   case Intrinsic::clear_cache:
7212     /// FunctionName may be null.
7213     if (const char *FunctionName = TLI.getClearCacheBuiltinName())
7214       lowerCallToExternalSymbol(I, FunctionName);
7215     return;
7216   case Intrinsic::donothing:
7217   case Intrinsic::seh_try_begin:
7218   case Intrinsic::seh_scope_begin:
7219   case Intrinsic::seh_try_end:
7220   case Intrinsic::seh_scope_end:
7221     // ignore
7222     return;
7223   case Intrinsic::experimental_stackmap:
7224     visitStackmap(I);
7225     return;
7226   case Intrinsic::experimental_patchpoint_void:
7227   case Intrinsic::experimental_patchpoint_i64:
7228     visitPatchpoint(I);
7229     return;
7230   case Intrinsic::experimental_gc_statepoint:
7231     LowerStatepoint(cast<GCStatepointInst>(I));
7232     return;
7233   case Intrinsic::experimental_gc_result:
7234     visitGCResult(cast<GCResultInst>(I));
7235     return;
7236   case Intrinsic::experimental_gc_relocate:
7237     visitGCRelocate(cast<GCRelocateInst>(I));
7238     return;
7239   case Intrinsic::instrprof_cover:
7240     llvm_unreachable("instrprof failed to lower a cover");
7241   case Intrinsic::instrprof_increment:
7242     llvm_unreachable("instrprof failed to lower an increment");
7243   case Intrinsic::instrprof_timestamp:
7244     llvm_unreachable("instrprof failed to lower a timestamp");
7245   case Intrinsic::instrprof_value_profile:
7246     llvm_unreachable("instrprof failed to lower a value profiling call");
7247   case Intrinsic::instrprof_mcdc_parameters:
7248     llvm_unreachable("instrprof failed to lower mcdc parameters");
7249   case Intrinsic::instrprof_mcdc_tvbitmap_update:
7250     llvm_unreachable("instrprof failed to lower an mcdc tvbitmap update");
7251   case Intrinsic::instrprof_mcdc_condbitmap_update:
7252     llvm_unreachable("instrprof failed to lower an mcdc condbitmap update");
7253   case Intrinsic::localescape: {
7254     MachineFunction &MF = DAG.getMachineFunction();
7255     const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
7256 
7257     // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission
7258     // is the same on all targets.
7259     for (unsigned Idx = 0, E = I.arg_size(); Idx < E; ++Idx) {
7260       Value *Arg = I.getArgOperand(Idx)->stripPointerCasts();
7261       if (isa<ConstantPointerNull>(Arg))
7262         continue; // Skip null pointers. They represent a hole in index space.
7263       AllocaInst *Slot = cast<AllocaInst>(Arg);
7264       assert(FuncInfo.StaticAllocaMap.count(Slot) &&
7265              "can only escape static allocas");
7266       int FI = FuncInfo.StaticAllocaMap[Slot];
7267       MCSymbol *FrameAllocSym =
7268           MF.getMMI().getContext().getOrCreateFrameAllocSymbol(
7269               GlobalValue::dropLLVMManglingEscape(MF.getName()), Idx);
7270       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, dl,
7271               TII->get(TargetOpcode::LOCAL_ESCAPE))
7272           .addSym(FrameAllocSym)
7273           .addFrameIndex(FI);
7274     }
7275 
7276     return;
7277   }
7278 
7279   case Intrinsic::localrecover: {
7280     // i8* @llvm.localrecover(i8* %fn, i8* %fp, i32 %idx)
7281     MachineFunction &MF = DAG.getMachineFunction();
7282 
7283     // Get the symbol that defines the frame offset.
7284     auto *Fn = cast<Function>(I.getArgOperand(0)->stripPointerCasts());
7285     auto *Idx = cast<ConstantInt>(I.getArgOperand(2));
7286     unsigned IdxVal =
7287         unsigned(Idx->getLimitedValue(std::numeric_limits<int>::max()));
7288     MCSymbol *FrameAllocSym =
7289         MF.getMMI().getContext().getOrCreateFrameAllocSymbol(
7290             GlobalValue::dropLLVMManglingEscape(Fn->getName()), IdxVal);
7291 
7292     Value *FP = I.getArgOperand(1);
7293     SDValue FPVal = getValue(FP);
7294     EVT PtrVT = FPVal.getValueType();
7295 
7296     // Create a MCSymbol for the label to avoid any target lowering
7297     // that would make this PC relative.
7298     SDValue OffsetSym = DAG.getMCSymbol(FrameAllocSym, PtrVT);
7299     SDValue OffsetVal =
7300         DAG.getNode(ISD::LOCAL_RECOVER, sdl, PtrVT, OffsetSym);
7301 
7302     // Add the offset to the FP.
7303     SDValue Add = DAG.getMemBasePlusOffset(FPVal, OffsetVal, sdl);
7304     setValue(&I, Add);
7305 
7306     return;
7307   }
7308 
7309   case Intrinsic::eh_exceptionpointer:
7310   case Intrinsic::eh_exceptioncode: {
7311     // Get the exception pointer vreg, copy from it, and resize it to fit.
7312     const auto *CPI = cast<CatchPadInst>(I.getArgOperand(0));
7313     MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
7314     const TargetRegisterClass *PtrRC = TLI.getRegClassFor(PtrVT);
7315     unsigned VReg = FuncInfo.getCatchPadExceptionPointerVReg(CPI, PtrRC);
7316     SDValue N = DAG.getCopyFromReg(DAG.getEntryNode(), sdl, VReg, PtrVT);
7317     if (Intrinsic == Intrinsic::eh_exceptioncode)
7318       N = DAG.getZExtOrTrunc(N, sdl, MVT::i32);
7319     setValue(&I, N);
7320     return;
7321   }
7322   case Intrinsic::xray_customevent: {
7323     // Here we want to make sure that the intrinsic behaves as if it has a
7324     // specific calling convention.
7325     const auto &Triple = DAG.getTarget().getTargetTriple();
7326     if (!Triple.isAArch64(64) && Triple.getArch() != Triple::x86_64)
7327       return;
7328 
7329     SmallVector<SDValue, 8> Ops;
7330 
7331     // We want to say that we always want the arguments in registers.
7332     SDValue LogEntryVal = getValue(I.getArgOperand(0));
7333     SDValue StrSizeVal = getValue(I.getArgOperand(1));
7334     SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7335     SDValue Chain = getRoot();
7336     Ops.push_back(LogEntryVal);
7337     Ops.push_back(StrSizeVal);
7338     Ops.push_back(Chain);
7339 
7340     // We need to enforce the calling convention for the callsite, so that
7341     // argument ordering is enforced correctly, and that register allocation can
7342     // see that some registers may be assumed clobbered and have to preserve
7343     // them across calls to the intrinsic.
7344     MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHABLE_EVENT_CALL,
7345                                            sdl, NodeTys, Ops);
7346     SDValue patchableNode = SDValue(MN, 0);
7347     DAG.setRoot(patchableNode);
7348     setValue(&I, patchableNode);
7349     return;
7350   }
7351   case Intrinsic::xray_typedevent: {
7352     // Here we want to make sure that the intrinsic behaves as if it has a
7353     // specific calling convention.
7354     const auto &Triple = DAG.getTarget().getTargetTriple();
7355     if (!Triple.isAArch64(64) && Triple.getArch() != Triple::x86_64)
7356       return;
7357 
7358     SmallVector<SDValue, 8> Ops;
7359 
7360     // We want to say that we always want the arguments in registers.
7361     // It's unclear to me how manipulating the selection DAG here forces callers
7362     // to provide arguments in registers instead of on the stack.
7363     SDValue LogTypeId = getValue(I.getArgOperand(0));
7364     SDValue LogEntryVal = getValue(I.getArgOperand(1));
7365     SDValue StrSizeVal = getValue(I.getArgOperand(2));
7366     SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7367     SDValue Chain = getRoot();
7368     Ops.push_back(LogTypeId);
7369     Ops.push_back(LogEntryVal);
7370     Ops.push_back(StrSizeVal);
7371     Ops.push_back(Chain);
7372 
7373     // We need to enforce the calling convention for the callsite, so that
7374     // argument ordering is enforced correctly, and that register allocation can
7375     // see that some registers may be assumed clobbered and have to preserve
7376     // them across calls to the intrinsic.
7377     MachineSDNode *MN = DAG.getMachineNode(
7378         TargetOpcode::PATCHABLE_TYPED_EVENT_CALL, sdl, NodeTys, Ops);
7379     SDValue patchableNode = SDValue(MN, 0);
7380     DAG.setRoot(patchableNode);
7381     setValue(&I, patchableNode);
7382     return;
7383   }
7384   case Intrinsic::experimental_deoptimize:
7385     LowerDeoptimizeCall(&I);
7386     return;
7387   case Intrinsic::experimental_stepvector:
7388     visitStepVector(I);
7389     return;
7390   case Intrinsic::vector_reduce_fadd:
7391   case Intrinsic::vector_reduce_fmul:
7392   case Intrinsic::vector_reduce_add:
7393   case Intrinsic::vector_reduce_mul:
7394   case Intrinsic::vector_reduce_and:
7395   case Intrinsic::vector_reduce_or:
7396   case Intrinsic::vector_reduce_xor:
7397   case Intrinsic::vector_reduce_smax:
7398   case Intrinsic::vector_reduce_smin:
7399   case Intrinsic::vector_reduce_umax:
7400   case Intrinsic::vector_reduce_umin:
7401   case Intrinsic::vector_reduce_fmax:
7402   case Intrinsic::vector_reduce_fmin:
7403   case Intrinsic::vector_reduce_fmaximum:
7404   case Intrinsic::vector_reduce_fminimum:
7405     visitVectorReduce(I, Intrinsic);
7406     return;
7407 
7408   case Intrinsic::icall_branch_funnel: {
7409     SmallVector<SDValue, 16> Ops;
7410     Ops.push_back(getValue(I.getArgOperand(0)));
7411 
7412     int64_t Offset;
7413     auto *Base = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
7414         I.getArgOperand(1), Offset, DAG.getDataLayout()));
7415     if (!Base)
7416       report_fatal_error(
7417           "llvm.icall.branch.funnel operand must be a GlobalValue");
7418     Ops.push_back(DAG.getTargetGlobalAddress(Base, sdl, MVT::i64, 0));
7419 
7420     struct BranchFunnelTarget {
7421       int64_t Offset;
7422       SDValue Target;
7423     };
7424     SmallVector<BranchFunnelTarget, 8> Targets;
7425 
7426     for (unsigned Op = 1, N = I.arg_size(); Op != N; Op += 2) {
7427       auto *ElemBase = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
7428           I.getArgOperand(Op), Offset, DAG.getDataLayout()));
7429       if (ElemBase != Base)
7430         report_fatal_error("all llvm.icall.branch.funnel operands must refer "
7431                            "to the same GlobalValue");
7432 
7433       SDValue Val = getValue(I.getArgOperand(Op + 1));
7434       auto *GA = dyn_cast<GlobalAddressSDNode>(Val);
7435       if (!GA)
7436         report_fatal_error(
7437             "llvm.icall.branch.funnel operand must be a GlobalValue");
7438       Targets.push_back({Offset, DAG.getTargetGlobalAddress(
7439                                      GA->getGlobal(), sdl, Val.getValueType(),
7440                                      GA->getOffset())});
7441     }
7442     llvm::sort(Targets,
7443                [](const BranchFunnelTarget &T1, const BranchFunnelTarget &T2) {
7444                  return T1.Offset < T2.Offset;
7445                });
7446 
7447     for (auto &T : Targets) {
7448       Ops.push_back(DAG.getTargetConstant(T.Offset, sdl, MVT::i32));
7449       Ops.push_back(T.Target);
7450     }
7451 
7452     Ops.push_back(DAG.getRoot()); // Chain
7453     SDValue N(DAG.getMachineNode(TargetOpcode::ICALL_BRANCH_FUNNEL, sdl,
7454                                  MVT::Other, Ops),
7455               0);
7456     DAG.setRoot(N);
7457     setValue(&I, N);
7458     HasTailCall = true;
7459     return;
7460   }
7461 
7462   case Intrinsic::wasm_landingpad_index:
7463     // Information this intrinsic contained has been transferred to
7464     // MachineFunction in SelectionDAGISel::PrepareEHLandingPad. We can safely
7465     // delete it now.
7466     return;
7467 
7468   case Intrinsic::aarch64_settag:
7469   case Intrinsic::aarch64_settag_zero: {
7470     const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7471     bool ZeroMemory = Intrinsic == Intrinsic::aarch64_settag_zero;
7472     SDValue Val = TSI.EmitTargetCodeForSetTag(
7473         DAG, sdl, getRoot(), getValue(I.getArgOperand(0)),
7474         getValue(I.getArgOperand(1)), MachinePointerInfo(I.getArgOperand(0)),
7475         ZeroMemory);
7476     DAG.setRoot(Val);
7477     setValue(&I, Val);
7478     return;
7479   }
7480   case Intrinsic::amdgcn_cs_chain: {
7481     assert(I.arg_size() == 5 && "Additional args not supported yet");
7482     assert(cast<ConstantInt>(I.getOperand(4))->isZero() &&
7483            "Non-zero flags not supported yet");
7484 
7485     // At this point we don't care if it's amdgpu_cs_chain or
7486     // amdgpu_cs_chain_preserve.
7487     CallingConv::ID CC = CallingConv::AMDGPU_CS_Chain;
7488 
7489     Type *RetTy = I.getType();
7490     assert(RetTy->isVoidTy() && "Should not return");
7491 
7492     SDValue Callee = getValue(I.getOperand(0));
7493 
7494     // We only have 2 actual args: one for the SGPRs and one for the VGPRs.
7495     // We'll also tack the value of the EXEC mask at the end.
7496     TargetLowering::ArgListTy Args;
7497     Args.reserve(3);
7498 
7499     for (unsigned Idx : {2, 3, 1}) {
7500       TargetLowering::ArgListEntry Arg;
7501       Arg.Node = getValue(I.getOperand(Idx));
7502       Arg.Ty = I.getOperand(Idx)->getType();
7503       Arg.setAttributes(&I, Idx);
7504       Args.push_back(Arg);
7505     }
7506 
7507     assert(Args[0].IsInReg && "SGPR args should be marked inreg");
7508     assert(!Args[1].IsInReg && "VGPR args should not be marked inreg");
7509     Args[2].IsInReg = true; // EXEC should be inreg
7510 
7511     TargetLowering::CallLoweringInfo CLI(DAG);
7512     CLI.setDebugLoc(getCurSDLoc())
7513         .setChain(getRoot())
7514         .setCallee(CC, RetTy, Callee, std::move(Args))
7515         .setNoReturn(true)
7516         .setTailCall(true)
7517         .setConvergent(I.isConvergent());
7518     CLI.CB = &I;
7519     std::pair<SDValue, SDValue> Result =
7520         lowerInvokable(CLI, /*EHPadBB*/ nullptr);
7521     (void)Result;
7522     assert(!Result.first.getNode() && !Result.second.getNode() &&
7523            "Should've lowered as tail call");
7524 
7525     HasTailCall = true;
7526     return;
7527   }
7528   case Intrinsic::ptrmask: {
7529     SDValue Ptr = getValue(I.getOperand(0));
7530     SDValue Mask = getValue(I.getOperand(1));
7531 
7532     EVT PtrVT = Ptr.getValueType();
7533     assert(PtrVT == Mask.getValueType() &&
7534            "Pointers with different index type are not supported by SDAG");
7535     setValue(&I, DAG.getNode(ISD::AND, sdl, PtrVT, Ptr, Mask));
7536     return;
7537   }
7538   case Intrinsic::threadlocal_address: {
7539     setValue(&I, getValue(I.getOperand(0)));
7540     return;
7541   }
7542   case Intrinsic::get_active_lane_mask: {
7543     EVT CCVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7544     SDValue Index = getValue(I.getOperand(0));
7545     EVT ElementVT = Index.getValueType();
7546 
7547     if (!TLI.shouldExpandGetActiveLaneMask(CCVT, ElementVT)) {
7548       visitTargetIntrinsic(I, Intrinsic);
7549       return;
7550     }
7551 
7552     SDValue TripCount = getValue(I.getOperand(1));
7553     EVT VecTy = EVT::getVectorVT(*DAG.getContext(), ElementVT,
7554                                  CCVT.getVectorElementCount());
7555 
7556     SDValue VectorIndex = DAG.getSplat(VecTy, sdl, Index);
7557     SDValue VectorTripCount = DAG.getSplat(VecTy, sdl, TripCount);
7558     SDValue VectorStep = DAG.getStepVector(sdl, VecTy);
7559     SDValue VectorInduction = DAG.getNode(
7560         ISD::UADDSAT, sdl, VecTy, VectorIndex, VectorStep);
7561     SDValue SetCC = DAG.getSetCC(sdl, CCVT, VectorInduction,
7562                                  VectorTripCount, ISD::CondCode::SETULT);
7563     setValue(&I, SetCC);
7564     return;
7565   }
7566   case Intrinsic::experimental_get_vector_length: {
7567     assert(cast<ConstantInt>(I.getOperand(1))->getSExtValue() > 0 &&
7568            "Expected positive VF");
7569     unsigned VF = cast<ConstantInt>(I.getOperand(1))->getZExtValue();
7570     bool IsScalable = cast<ConstantInt>(I.getOperand(2))->isOne();
7571 
7572     SDValue Count = getValue(I.getOperand(0));
7573     EVT CountVT = Count.getValueType();
7574 
7575     if (!TLI.shouldExpandGetVectorLength(CountVT, VF, IsScalable)) {
7576       visitTargetIntrinsic(I, Intrinsic);
7577       return;
7578     }
7579 
7580     // Expand to a umin between the trip count and the maximum elements the type
7581     // can hold.
7582     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7583 
7584     // Extend the trip count to at least the result VT.
7585     if (CountVT.bitsLT(VT)) {
7586       Count = DAG.getNode(ISD::ZERO_EXTEND, sdl, VT, Count);
7587       CountVT = VT;
7588     }
7589 
7590     SDValue MaxEVL = DAG.getElementCount(sdl, CountVT,
7591                                          ElementCount::get(VF, IsScalable));
7592 
7593     SDValue UMin = DAG.getNode(ISD::UMIN, sdl, CountVT, Count, MaxEVL);
7594     // Clip to the result type if needed.
7595     SDValue Trunc = DAG.getNode(ISD::TRUNCATE, sdl, VT, UMin);
7596 
7597     setValue(&I, Trunc);
7598     return;
7599   }
7600   case Intrinsic::experimental_cttz_elts: {
7601     auto DL = getCurSDLoc();
7602     SDValue Op = getValue(I.getOperand(0));
7603     EVT OpVT = Op.getValueType();
7604 
7605     if (!TLI.shouldExpandCttzElements(OpVT)) {
7606       visitTargetIntrinsic(I, Intrinsic);
7607       return;
7608     }
7609 
7610     if (OpVT.getScalarType() != MVT::i1) {
7611       // Compare the input vector elements to zero & use to count trailing zeros
7612       SDValue AllZero = DAG.getConstant(0, DL, OpVT);
7613       OpVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
7614                               OpVT.getVectorElementCount());
7615       Op = DAG.getSetCC(DL, OpVT, Op, AllZero, ISD::SETNE);
7616     }
7617 
7618     // Find the smallest "sensible" element type to use for the expansion.
7619     ConstantRange CR(
7620         APInt(64, OpVT.getVectorElementCount().getKnownMinValue()));
7621     if (OpVT.isScalableVT())
7622       CR = CR.umul_sat(getVScaleRange(I.getCaller(), 64));
7623 
7624     // If the zero-is-poison flag is set, we can assume the upper limit
7625     // of the result is VF-1.
7626     if (!cast<ConstantSDNode>(getValue(I.getOperand(1)))->isZero())
7627       CR = CR.subtract(APInt(64, 1));
7628 
7629     unsigned EltWidth = I.getType()->getScalarSizeInBits();
7630     EltWidth = std::min(EltWidth, (unsigned)CR.getActiveBits());
7631     EltWidth = std::max(llvm::bit_ceil(EltWidth), (unsigned)8);
7632 
7633     MVT NewEltTy = MVT::getIntegerVT(EltWidth);
7634 
7635     // Create the new vector type & get the vector length
7636     EVT NewVT = EVT::getVectorVT(*DAG.getContext(), NewEltTy,
7637                                  OpVT.getVectorElementCount());
7638 
7639     SDValue VL =
7640         DAG.getElementCount(DL, NewEltTy, OpVT.getVectorElementCount());
7641 
7642     SDValue StepVec = DAG.getStepVector(DL, NewVT);
7643     SDValue SplatVL = DAG.getSplat(NewVT, DL, VL);
7644     SDValue StepVL = DAG.getNode(ISD::SUB, DL, NewVT, SplatVL, StepVec);
7645     SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, Op);
7646     SDValue And = DAG.getNode(ISD::AND, DL, NewVT, StepVL, Ext);
7647     SDValue Max = DAG.getNode(ISD::VECREDUCE_UMAX, DL, NewEltTy, And);
7648     SDValue Sub = DAG.getNode(ISD::SUB, DL, NewEltTy, VL, Max);
7649 
7650     EVT RetTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
7651     SDValue Ret = DAG.getZExtOrTrunc(Sub, DL, RetTy);
7652 
7653     setValue(&I, Ret);
7654     return;
7655   }
7656   case Intrinsic::vector_insert: {
7657     SDValue Vec = getValue(I.getOperand(0));
7658     SDValue SubVec = getValue(I.getOperand(1));
7659     SDValue Index = getValue(I.getOperand(2));
7660 
7661     // The intrinsic's index type is i64, but the SDNode requires an index type
7662     // suitable for the target. Convert the index as required.
7663     MVT VectorIdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
7664     if (Index.getValueType() != VectorIdxTy)
7665       Index = DAG.getVectorIdxConstant(
7666           cast<ConstantSDNode>(Index)->getZExtValue(), sdl);
7667 
7668     EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7669     setValue(&I, DAG.getNode(ISD::INSERT_SUBVECTOR, sdl, ResultVT, Vec, SubVec,
7670                              Index));
7671     return;
7672   }
7673   case Intrinsic::vector_extract: {
7674     SDValue Vec = getValue(I.getOperand(0));
7675     SDValue Index = getValue(I.getOperand(1));
7676     EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7677 
7678     // The intrinsic's index type is i64, but the SDNode requires an index type
7679     // suitable for the target. Convert the index as required.
7680     MVT VectorIdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
7681     if (Index.getValueType() != VectorIdxTy)
7682       Index = DAG.getVectorIdxConstant(
7683           cast<ConstantSDNode>(Index)->getZExtValue(), sdl);
7684 
7685     setValue(&I,
7686              DAG.getNode(ISD::EXTRACT_SUBVECTOR, sdl, ResultVT, Vec, Index));
7687     return;
7688   }
7689   case Intrinsic::experimental_vector_reverse:
7690     visitVectorReverse(I);
7691     return;
7692   case Intrinsic::experimental_vector_splice:
7693     visitVectorSplice(I);
7694     return;
7695   case Intrinsic::callbr_landingpad:
7696     visitCallBrLandingPad(I);
7697     return;
7698   case Intrinsic::experimental_vector_interleave2:
7699     visitVectorInterleave(I);
7700     return;
7701   case Intrinsic::experimental_vector_deinterleave2:
7702     visitVectorDeinterleave(I);
7703     return;
7704   }
7705 }
7706 
7707 void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
7708     const ConstrainedFPIntrinsic &FPI) {
7709   SDLoc sdl = getCurSDLoc();
7710 
7711   // We do not need to serialize constrained FP intrinsics against
7712   // each other or against (nonvolatile) loads, so they can be
7713   // chained like loads.
7714   SDValue Chain = DAG.getRoot();
7715   SmallVector<SDValue, 4> Opers;
7716   Opers.push_back(Chain);
7717   if (FPI.isUnaryOp()) {
7718     Opers.push_back(getValue(FPI.getArgOperand(0)));
7719   } else if (FPI.isTernaryOp()) {
7720     Opers.push_back(getValue(FPI.getArgOperand(0)));
7721     Opers.push_back(getValue(FPI.getArgOperand(1)));
7722     Opers.push_back(getValue(FPI.getArgOperand(2)));
7723   } else {
7724     Opers.push_back(getValue(FPI.getArgOperand(0)));
7725     Opers.push_back(getValue(FPI.getArgOperand(1)));
7726   }
7727 
7728   auto pushOutChain = [this](SDValue Result, fp::ExceptionBehavior EB) {
7729     assert(Result.getNode()->getNumValues() == 2);
7730 
7731     // Push node to the appropriate list so that future instructions can be
7732     // chained up correctly.
7733     SDValue OutChain = Result.getValue(1);
7734     switch (EB) {
7735     case fp::ExceptionBehavior::ebIgnore:
7736       // The only reason why ebIgnore nodes still need to be chained is that
7737       // they might depend on the current rounding mode, and therefore must
7738       // not be moved across instruction that may change that mode.
7739       [[fallthrough]];
7740     case fp::ExceptionBehavior::ebMayTrap:
7741       // These must not be moved across calls or instructions that may change
7742       // floating-point exception masks.
7743       PendingConstrainedFP.push_back(OutChain);
7744       break;
7745     case fp::ExceptionBehavior::ebStrict:
7746       // These must not be moved across calls or instructions that may change
7747       // floating-point exception masks or read floating-point exception flags.
7748       // In addition, they cannot be optimized out even if unused.
7749       PendingConstrainedFPStrict.push_back(OutChain);
7750       break;
7751     }
7752   };
7753 
7754   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7755   EVT VT = TLI.getValueType(DAG.getDataLayout(), FPI.getType());
7756   SDVTList VTs = DAG.getVTList(VT, MVT::Other);
7757   fp::ExceptionBehavior EB = *FPI.getExceptionBehavior();
7758 
7759   SDNodeFlags Flags;
7760   if (EB == fp::ExceptionBehavior::ebIgnore)
7761     Flags.setNoFPExcept(true);
7762 
7763   if (auto *FPOp = dyn_cast<FPMathOperator>(&FPI))
7764     Flags.copyFMF(*FPOp);
7765 
7766   unsigned Opcode;
7767   switch (FPI.getIntrinsicID()) {
7768   default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
7769 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
7770   case Intrinsic::INTRINSIC:                                                   \
7771     Opcode = ISD::STRICT_##DAGN;                                               \
7772     break;
7773 #include "llvm/IR/ConstrainedOps.def"
7774   case Intrinsic::experimental_constrained_fmuladd: {
7775     Opcode = ISD::STRICT_FMA;
7776     // Break fmuladd into fmul and fadd.
7777     if (TM.Options.AllowFPOpFusion == FPOpFusion::Strict ||
7778         !TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT)) {
7779       Opers.pop_back();
7780       SDValue Mul = DAG.getNode(ISD::STRICT_FMUL, sdl, VTs, Opers, Flags);
7781       pushOutChain(Mul, EB);
7782       Opcode = ISD::STRICT_FADD;
7783       Opers.clear();
7784       Opers.push_back(Mul.getValue(1));
7785       Opers.push_back(Mul.getValue(0));
7786       Opers.push_back(getValue(FPI.getArgOperand(2)));
7787     }
7788     break;
7789   }
7790   }
7791 
7792   // A few strict DAG nodes carry additional operands that are not
7793   // set up by the default code above.
7794   switch (Opcode) {
7795   default: break;
7796   case ISD::STRICT_FP_ROUND:
7797     Opers.push_back(
7798         DAG.getTargetConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout())));
7799     break;
7800   case ISD::STRICT_FSETCC:
7801   case ISD::STRICT_FSETCCS: {
7802     auto *FPCmp = dyn_cast<ConstrainedFPCmpIntrinsic>(&FPI);
7803     ISD::CondCode Condition = getFCmpCondCode(FPCmp->getPredicate());
7804     if (TM.Options.NoNaNsFPMath)
7805       Condition = getFCmpCodeWithoutNaN(Condition);
7806     Opers.push_back(DAG.getCondCode(Condition));
7807     break;
7808   }
7809   }
7810 
7811   SDValue Result = DAG.getNode(Opcode, sdl, VTs, Opers, Flags);
7812   pushOutChain(Result, EB);
7813 
7814   SDValue FPResult = Result.getValue(0);
7815   setValue(&FPI, FPResult);
7816 }
7817 
7818 static unsigned getISDForVPIntrinsic(const VPIntrinsic &VPIntrin) {
7819   std::optional<unsigned> ResOPC;
7820   switch (VPIntrin.getIntrinsicID()) {
7821   case Intrinsic::vp_ctlz: {
7822     bool IsZeroUndef = cast<ConstantInt>(VPIntrin.getArgOperand(1))->isOne();
7823     ResOPC = IsZeroUndef ? ISD::VP_CTLZ_ZERO_UNDEF : ISD::VP_CTLZ;
7824     break;
7825   }
7826   case Intrinsic::vp_cttz: {
7827     bool IsZeroUndef = cast<ConstantInt>(VPIntrin.getArgOperand(1))->isOne();
7828     ResOPC = IsZeroUndef ? ISD::VP_CTTZ_ZERO_UNDEF : ISD::VP_CTTZ;
7829     break;
7830   }
7831 #define HELPER_MAP_VPID_TO_VPSD(VPID, VPSD)                                    \
7832   case Intrinsic::VPID:                                                        \
7833     ResOPC = ISD::VPSD;                                                        \
7834     break;
7835 #include "llvm/IR/VPIntrinsics.def"
7836   }
7837 
7838   if (!ResOPC)
7839     llvm_unreachable(
7840         "Inconsistency: no SDNode available for this VPIntrinsic!");
7841 
7842   if (*ResOPC == ISD::VP_REDUCE_SEQ_FADD ||
7843       *ResOPC == ISD::VP_REDUCE_SEQ_FMUL) {
7844     if (VPIntrin.getFastMathFlags().allowReassoc())
7845       return *ResOPC == ISD::VP_REDUCE_SEQ_FADD ? ISD::VP_REDUCE_FADD
7846                                                 : ISD::VP_REDUCE_FMUL;
7847   }
7848 
7849   return *ResOPC;
7850 }
7851 
7852 void SelectionDAGBuilder::visitVPLoad(
7853     const VPIntrinsic &VPIntrin, EVT VT,
7854     const SmallVectorImpl<SDValue> &OpValues) {
7855   SDLoc DL = getCurSDLoc();
7856   Value *PtrOperand = VPIntrin.getArgOperand(0);
7857   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
7858   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
7859   const MDNode *Ranges = getRangeMetadata(VPIntrin);
7860   SDValue LD;
7861   // Do not serialize variable-length loads of constant memory with
7862   // anything.
7863   if (!Alignment)
7864     Alignment = DAG.getEVTAlign(VT);
7865   MemoryLocation ML = MemoryLocation::getAfter(PtrOperand, AAInfo);
7866   bool AddToChain = !AA || !AA->pointsToConstantMemory(ML);
7867   SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
7868   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
7869       MachinePointerInfo(PtrOperand), MachineMemOperand::MOLoad,
7870       MemoryLocation::UnknownSize, *Alignment, AAInfo, Ranges);
7871   LD = DAG.getLoadVP(VT, DL, InChain, OpValues[0], OpValues[1], OpValues[2],
7872                      MMO, false /*IsExpanding */);
7873   if (AddToChain)
7874     PendingLoads.push_back(LD.getValue(1));
7875   setValue(&VPIntrin, LD);
7876 }
7877 
7878 void SelectionDAGBuilder::visitVPGather(
7879     const VPIntrinsic &VPIntrin, EVT VT,
7880     const SmallVectorImpl<SDValue> &OpValues) {
7881   SDLoc DL = getCurSDLoc();
7882   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7883   Value *PtrOperand = VPIntrin.getArgOperand(0);
7884   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
7885   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
7886   const MDNode *Ranges = getRangeMetadata(VPIntrin);
7887   SDValue LD;
7888   if (!Alignment)
7889     Alignment = DAG.getEVTAlign(VT.getScalarType());
7890   unsigned AS =
7891     PtrOperand->getType()->getScalarType()->getPointerAddressSpace();
7892   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
7893      MachinePointerInfo(AS), MachineMemOperand::MOLoad,
7894      MemoryLocation::UnknownSize, *Alignment, AAInfo, Ranges);
7895   SDValue Base, Index, Scale;
7896   ISD::MemIndexType IndexType;
7897   bool UniformBase = getUniformBase(PtrOperand, Base, Index, IndexType, Scale,
7898                                     this, VPIntrin.getParent(),
7899                                     VT.getScalarStoreSize());
7900   if (!UniformBase) {
7901     Base = DAG.getConstant(0, DL, TLI.getPointerTy(DAG.getDataLayout()));
7902     Index = getValue(PtrOperand);
7903     IndexType = ISD::SIGNED_SCALED;
7904     Scale = DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout()));
7905   }
7906   EVT IdxVT = Index.getValueType();
7907   EVT EltTy = IdxVT.getVectorElementType();
7908   if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
7909     EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
7910     Index = DAG.getNode(ISD::SIGN_EXTEND, DL, NewIdxVT, Index);
7911   }
7912   LD = DAG.getGatherVP(
7913       DAG.getVTList(VT, MVT::Other), VT, DL,
7914       {DAG.getRoot(), Base, Index, Scale, OpValues[1], OpValues[2]}, MMO,
7915       IndexType);
7916   PendingLoads.push_back(LD.getValue(1));
7917   setValue(&VPIntrin, LD);
7918 }
7919 
7920 void SelectionDAGBuilder::visitVPStore(
7921     const VPIntrinsic &VPIntrin, const SmallVectorImpl<SDValue> &OpValues) {
7922   SDLoc DL = getCurSDLoc();
7923   Value *PtrOperand = VPIntrin.getArgOperand(1);
7924   EVT VT = OpValues[0].getValueType();
7925   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
7926   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
7927   SDValue ST;
7928   if (!Alignment)
7929     Alignment = DAG.getEVTAlign(VT);
7930   SDValue Ptr = OpValues[1];
7931   SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
7932   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
7933       MachinePointerInfo(PtrOperand), MachineMemOperand::MOStore,
7934       MemoryLocation::UnknownSize, *Alignment, AAInfo);
7935   ST = DAG.getStoreVP(getMemoryRoot(), DL, OpValues[0], Ptr, Offset,
7936                       OpValues[2], OpValues[3], VT, MMO, ISD::UNINDEXED,
7937                       /* IsTruncating */ false, /*IsCompressing*/ false);
7938   DAG.setRoot(ST);
7939   setValue(&VPIntrin, ST);
7940 }
7941 
7942 void SelectionDAGBuilder::visitVPScatter(
7943     const VPIntrinsic &VPIntrin, const SmallVectorImpl<SDValue> &OpValues) {
7944   SDLoc DL = getCurSDLoc();
7945   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7946   Value *PtrOperand = VPIntrin.getArgOperand(1);
7947   EVT VT = OpValues[0].getValueType();
7948   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
7949   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
7950   SDValue ST;
7951   if (!Alignment)
7952     Alignment = DAG.getEVTAlign(VT.getScalarType());
7953   unsigned AS =
7954       PtrOperand->getType()->getScalarType()->getPointerAddressSpace();
7955   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
7956       MachinePointerInfo(AS), MachineMemOperand::MOStore,
7957       MemoryLocation::UnknownSize, *Alignment, AAInfo);
7958   SDValue Base, Index, Scale;
7959   ISD::MemIndexType IndexType;
7960   bool UniformBase = getUniformBase(PtrOperand, Base, Index, IndexType, Scale,
7961                                     this, VPIntrin.getParent(),
7962                                     VT.getScalarStoreSize());
7963   if (!UniformBase) {
7964     Base = DAG.getConstant(0, DL, TLI.getPointerTy(DAG.getDataLayout()));
7965     Index = getValue(PtrOperand);
7966     IndexType = ISD::SIGNED_SCALED;
7967     Scale =
7968       DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout()));
7969   }
7970   EVT IdxVT = Index.getValueType();
7971   EVT EltTy = IdxVT.getVectorElementType();
7972   if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
7973     EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
7974     Index = DAG.getNode(ISD::SIGN_EXTEND, DL, NewIdxVT, Index);
7975   }
7976   ST = DAG.getScatterVP(DAG.getVTList(MVT::Other), VT, DL,
7977                         {getMemoryRoot(), OpValues[0], Base, Index, Scale,
7978                          OpValues[2], OpValues[3]},
7979                         MMO, IndexType);
7980   DAG.setRoot(ST);
7981   setValue(&VPIntrin, ST);
7982 }
7983 
7984 void SelectionDAGBuilder::visitVPStridedLoad(
7985     const VPIntrinsic &VPIntrin, EVT VT,
7986     const SmallVectorImpl<SDValue> &OpValues) {
7987   SDLoc DL = getCurSDLoc();
7988   Value *PtrOperand = VPIntrin.getArgOperand(0);
7989   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
7990   if (!Alignment)
7991     Alignment = DAG.getEVTAlign(VT.getScalarType());
7992   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
7993   const MDNode *Ranges = getRangeMetadata(VPIntrin);
7994   MemoryLocation ML = MemoryLocation::getAfter(PtrOperand, AAInfo);
7995   bool AddToChain = !AA || !AA->pointsToConstantMemory(ML);
7996   SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
7997   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
7998       MachinePointerInfo(PtrOperand), MachineMemOperand::MOLoad,
7999       MemoryLocation::UnknownSize, *Alignment, AAInfo, Ranges);
8000 
8001   SDValue LD = DAG.getStridedLoadVP(VT, DL, InChain, OpValues[0], OpValues[1],
8002                                     OpValues[2], OpValues[3], MMO,
8003                                     false /*IsExpanding*/);
8004 
8005   if (AddToChain)
8006     PendingLoads.push_back(LD.getValue(1));
8007   setValue(&VPIntrin, LD);
8008 }
8009 
8010 void SelectionDAGBuilder::visitVPStridedStore(
8011     const VPIntrinsic &VPIntrin, const SmallVectorImpl<SDValue> &OpValues) {
8012   SDLoc DL = getCurSDLoc();
8013   Value *PtrOperand = VPIntrin.getArgOperand(1);
8014   EVT VT = OpValues[0].getValueType();
8015   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
8016   if (!Alignment)
8017     Alignment = DAG.getEVTAlign(VT.getScalarType());
8018   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
8019   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
8020       MachinePointerInfo(PtrOperand), MachineMemOperand::MOStore,
8021       MemoryLocation::UnknownSize, *Alignment, AAInfo);
8022 
8023   SDValue ST = DAG.getStridedStoreVP(
8024       getMemoryRoot(), DL, OpValues[0], OpValues[1],
8025       DAG.getUNDEF(OpValues[1].getValueType()), OpValues[2], OpValues[3],
8026       OpValues[4], VT, MMO, ISD::UNINDEXED, /*IsTruncating*/ false,
8027       /*IsCompressing*/ false);
8028 
8029   DAG.setRoot(ST);
8030   setValue(&VPIntrin, ST);
8031 }
8032 
8033 void SelectionDAGBuilder::visitVPCmp(const VPCmpIntrinsic &VPIntrin) {
8034   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8035   SDLoc DL = getCurSDLoc();
8036 
8037   ISD::CondCode Condition;
8038   CmpInst::Predicate CondCode = VPIntrin.getPredicate();
8039   bool IsFP = VPIntrin.getOperand(0)->getType()->isFPOrFPVectorTy();
8040   if (IsFP) {
8041     // FIXME: Regular fcmps are FPMathOperators which may have fast-math (nnan)
8042     // flags, but calls that don't return floating-point types can't be
8043     // FPMathOperators, like vp.fcmp. This affects constrained fcmp too.
8044     Condition = getFCmpCondCode(CondCode);
8045     if (TM.Options.NoNaNsFPMath)
8046       Condition = getFCmpCodeWithoutNaN(Condition);
8047   } else {
8048     Condition = getICmpCondCode(CondCode);
8049   }
8050 
8051   SDValue Op1 = getValue(VPIntrin.getOperand(0));
8052   SDValue Op2 = getValue(VPIntrin.getOperand(1));
8053   // #2 is the condition code
8054   SDValue MaskOp = getValue(VPIntrin.getOperand(3));
8055   SDValue EVL = getValue(VPIntrin.getOperand(4));
8056   MVT EVLParamVT = TLI.getVPExplicitVectorLengthTy();
8057   assert(EVLParamVT.isScalarInteger() && EVLParamVT.bitsGE(MVT::i32) &&
8058          "Unexpected target EVL type");
8059   EVL = DAG.getNode(ISD::ZERO_EXTEND, DL, EVLParamVT, EVL);
8060 
8061   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
8062                                                         VPIntrin.getType());
8063   setValue(&VPIntrin,
8064            DAG.getSetCCVP(DL, DestVT, Op1, Op2, Condition, MaskOp, EVL));
8065 }
8066 
8067 void SelectionDAGBuilder::visitVectorPredicationIntrinsic(
8068     const VPIntrinsic &VPIntrin) {
8069   SDLoc DL = getCurSDLoc();
8070   unsigned Opcode = getISDForVPIntrinsic(VPIntrin);
8071 
8072   auto IID = VPIntrin.getIntrinsicID();
8073 
8074   if (const auto *CmpI = dyn_cast<VPCmpIntrinsic>(&VPIntrin))
8075     return visitVPCmp(*CmpI);
8076 
8077   SmallVector<EVT, 4> ValueVTs;
8078   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8079   ComputeValueVTs(TLI, DAG.getDataLayout(), VPIntrin.getType(), ValueVTs);
8080   SDVTList VTs = DAG.getVTList(ValueVTs);
8081 
8082   auto EVLParamPos = VPIntrinsic::getVectorLengthParamPos(IID);
8083 
8084   MVT EVLParamVT = TLI.getVPExplicitVectorLengthTy();
8085   assert(EVLParamVT.isScalarInteger() && EVLParamVT.bitsGE(MVT::i32) &&
8086          "Unexpected target EVL type");
8087 
8088   // Request operands.
8089   SmallVector<SDValue, 7> OpValues;
8090   for (unsigned I = 0; I < VPIntrin.arg_size(); ++I) {
8091     auto Op = getValue(VPIntrin.getArgOperand(I));
8092     if (I == EVLParamPos)
8093       Op = DAG.getNode(ISD::ZERO_EXTEND, DL, EVLParamVT, Op);
8094     OpValues.push_back(Op);
8095   }
8096 
8097   switch (Opcode) {
8098   default: {
8099     SDNodeFlags SDFlags;
8100     if (auto *FPMO = dyn_cast<FPMathOperator>(&VPIntrin))
8101       SDFlags.copyFMF(*FPMO);
8102     SDValue Result = DAG.getNode(Opcode, DL, VTs, OpValues, SDFlags);
8103     setValue(&VPIntrin, Result);
8104     break;
8105   }
8106   case ISD::VP_LOAD:
8107     visitVPLoad(VPIntrin, ValueVTs[0], OpValues);
8108     break;
8109   case ISD::VP_GATHER:
8110     visitVPGather(VPIntrin, ValueVTs[0], OpValues);
8111     break;
8112   case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
8113     visitVPStridedLoad(VPIntrin, ValueVTs[0], OpValues);
8114     break;
8115   case ISD::VP_STORE:
8116     visitVPStore(VPIntrin, OpValues);
8117     break;
8118   case ISD::VP_SCATTER:
8119     visitVPScatter(VPIntrin, OpValues);
8120     break;
8121   case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
8122     visitVPStridedStore(VPIntrin, OpValues);
8123     break;
8124   case ISD::VP_FMULADD: {
8125     assert(OpValues.size() == 5 && "Unexpected number of operands");
8126     SDNodeFlags SDFlags;
8127     if (auto *FPMO = dyn_cast<FPMathOperator>(&VPIntrin))
8128       SDFlags.copyFMF(*FPMO);
8129     if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
8130         TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), ValueVTs[0])) {
8131       setValue(&VPIntrin, DAG.getNode(ISD::VP_FMA, DL, VTs, OpValues, SDFlags));
8132     } else {
8133       SDValue Mul = DAG.getNode(
8134           ISD::VP_FMUL, DL, VTs,
8135           {OpValues[0], OpValues[1], OpValues[3], OpValues[4]}, SDFlags);
8136       SDValue Add =
8137           DAG.getNode(ISD::VP_FADD, DL, VTs,
8138                       {Mul, OpValues[2], OpValues[3], OpValues[4]}, SDFlags);
8139       setValue(&VPIntrin, Add);
8140     }
8141     break;
8142   }
8143   case ISD::VP_IS_FPCLASS: {
8144     const DataLayout DLayout = DAG.getDataLayout();
8145     EVT DestVT = TLI.getValueType(DLayout, VPIntrin.getType());
8146     auto Constant = cast<ConstantSDNode>(OpValues[1])->getZExtValue();
8147     SDValue Check = DAG.getTargetConstant(Constant, DL, MVT::i32);
8148     SDValue V = DAG.getNode(ISD::VP_IS_FPCLASS, DL, DestVT,
8149                             {OpValues[0], Check, OpValues[2], OpValues[3]});
8150     setValue(&VPIntrin, V);
8151     return;
8152   }
8153   case ISD::VP_INTTOPTR: {
8154     SDValue N = OpValues[0];
8155     EVT DestVT = TLI.getValueType(DAG.getDataLayout(), VPIntrin.getType());
8156     EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(), VPIntrin.getType());
8157     N = DAG.getVPPtrExtOrTrunc(getCurSDLoc(), DestVT, N, OpValues[1],
8158                                OpValues[2]);
8159     N = DAG.getVPZExtOrTrunc(getCurSDLoc(), PtrMemVT, N, OpValues[1],
8160                              OpValues[2]);
8161     setValue(&VPIntrin, N);
8162     break;
8163   }
8164   case ISD::VP_PTRTOINT: {
8165     SDValue N = OpValues[0];
8166     EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
8167                                                           VPIntrin.getType());
8168     EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(),
8169                                        VPIntrin.getOperand(0)->getType());
8170     N = DAG.getVPPtrExtOrTrunc(getCurSDLoc(), PtrMemVT, N, OpValues[1],
8171                                OpValues[2]);
8172     N = DAG.getVPZExtOrTrunc(getCurSDLoc(), DestVT, N, OpValues[1],
8173                              OpValues[2]);
8174     setValue(&VPIntrin, N);
8175     break;
8176   }
8177   case ISD::VP_ABS:
8178   case ISD::VP_CTLZ:
8179   case ISD::VP_CTLZ_ZERO_UNDEF:
8180   case ISD::VP_CTTZ:
8181   case ISD::VP_CTTZ_ZERO_UNDEF: {
8182     SDValue Result =
8183         DAG.getNode(Opcode, DL, VTs, {OpValues[0], OpValues[2], OpValues[3]});
8184     setValue(&VPIntrin, Result);
8185     break;
8186   }
8187   }
8188 }
8189 
8190 SDValue SelectionDAGBuilder::lowerStartEH(SDValue Chain,
8191                                           const BasicBlock *EHPadBB,
8192                                           MCSymbol *&BeginLabel) {
8193   MachineFunction &MF = DAG.getMachineFunction();
8194   MachineModuleInfo &MMI = MF.getMMI();
8195 
8196   // Insert a label before the invoke call to mark the try range.  This can be
8197   // used to detect deletion of the invoke via the MachineModuleInfo.
8198   BeginLabel = MMI.getContext().createTempSymbol();
8199 
8200   // For SjLj, keep track of which landing pads go with which invokes
8201   // so as to maintain the ordering of pads in the LSDA.
8202   unsigned CallSiteIndex = MMI.getCurrentCallSite();
8203   if (CallSiteIndex) {
8204     MF.setCallSiteBeginLabel(BeginLabel, CallSiteIndex);
8205     LPadToCallSiteMap[FuncInfo.MBBMap[EHPadBB]].push_back(CallSiteIndex);
8206 
8207     // Now that the call site is handled, stop tracking it.
8208     MMI.setCurrentCallSite(0);
8209   }
8210 
8211   return DAG.getEHLabel(getCurSDLoc(), Chain, BeginLabel);
8212 }
8213 
8214 SDValue SelectionDAGBuilder::lowerEndEH(SDValue Chain, const InvokeInst *II,
8215                                         const BasicBlock *EHPadBB,
8216                                         MCSymbol *BeginLabel) {
8217   assert(BeginLabel && "BeginLabel should've been set");
8218 
8219   MachineFunction &MF = DAG.getMachineFunction();
8220   MachineModuleInfo &MMI = MF.getMMI();
8221 
8222   // Insert a label at the end of the invoke call to mark the try range.  This
8223   // can be used to detect deletion of the invoke via the MachineModuleInfo.
8224   MCSymbol *EndLabel = MMI.getContext().createTempSymbol();
8225   Chain = DAG.getEHLabel(getCurSDLoc(), Chain, EndLabel);
8226 
8227   // Inform MachineModuleInfo of range.
8228   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
8229   // There is a platform (e.g. wasm) that uses funclet style IR but does not
8230   // actually use outlined funclets and their LSDA info style.
8231   if (MF.hasEHFunclets() && isFuncletEHPersonality(Pers)) {
8232     assert(II && "II should've been set");
8233     WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
8234     EHInfo->addIPToStateRange(II, BeginLabel, EndLabel);
8235   } else if (!isScopedEHPersonality(Pers)) {
8236     assert(EHPadBB);
8237     MF.addInvoke(FuncInfo.MBBMap[EHPadBB], BeginLabel, EndLabel);
8238   }
8239 
8240   return Chain;
8241 }
8242 
8243 std::pair<SDValue, SDValue>
8244 SelectionDAGBuilder::lowerInvokable(TargetLowering::CallLoweringInfo &CLI,
8245                                     const BasicBlock *EHPadBB) {
8246   MCSymbol *BeginLabel = nullptr;
8247 
8248   if (EHPadBB) {
8249     // Both PendingLoads and PendingExports must be flushed here;
8250     // this call might not return.
8251     (void)getRoot();
8252     DAG.setRoot(lowerStartEH(getControlRoot(), EHPadBB, BeginLabel));
8253     CLI.setChain(getRoot());
8254   }
8255 
8256   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8257   std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
8258 
8259   assert((CLI.IsTailCall || Result.second.getNode()) &&
8260          "Non-null chain expected with non-tail call!");
8261   assert((Result.second.getNode() || !Result.first.getNode()) &&
8262          "Null value expected with tail call!");
8263 
8264   if (!Result.second.getNode()) {
8265     // As a special case, a null chain means that a tail call has been emitted
8266     // and the DAG root is already updated.
8267     HasTailCall = true;
8268 
8269     // Since there's no actual continuation from this block, nothing can be
8270     // relying on us setting vregs for them.
8271     PendingExports.clear();
8272   } else {
8273     DAG.setRoot(Result.second);
8274   }
8275 
8276   if (EHPadBB) {
8277     DAG.setRoot(lowerEndEH(getRoot(), cast_or_null<InvokeInst>(CLI.CB), EHPadBB,
8278                            BeginLabel));
8279   }
8280 
8281   return Result;
8282 }
8283 
8284 void SelectionDAGBuilder::LowerCallTo(const CallBase &CB, SDValue Callee,
8285                                       bool isTailCall,
8286                                       bool isMustTailCall,
8287                                       const BasicBlock *EHPadBB) {
8288   auto &DL = DAG.getDataLayout();
8289   FunctionType *FTy = CB.getFunctionType();
8290   Type *RetTy = CB.getType();
8291 
8292   TargetLowering::ArgListTy Args;
8293   Args.reserve(CB.arg_size());
8294 
8295   const Value *SwiftErrorVal = nullptr;
8296   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8297 
8298   if (isTailCall) {
8299     // Avoid emitting tail calls in functions with the disable-tail-calls
8300     // attribute.
8301     auto *Caller = CB.getParent()->getParent();
8302     if (Caller->getFnAttribute("disable-tail-calls").getValueAsString() ==
8303         "true" && !isMustTailCall)
8304       isTailCall = false;
8305 
8306     // We can't tail call inside a function with a swifterror argument. Lowering
8307     // does not support this yet. It would have to move into the swifterror
8308     // register before the call.
8309     if (TLI.supportSwiftError() &&
8310         Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
8311       isTailCall = false;
8312   }
8313 
8314   for (auto I = CB.arg_begin(), E = CB.arg_end(); I != E; ++I) {
8315     TargetLowering::ArgListEntry Entry;
8316     const Value *V = *I;
8317 
8318     // Skip empty types
8319     if (V->getType()->isEmptyTy())
8320       continue;
8321 
8322     SDValue ArgNode = getValue(V);
8323     Entry.Node = ArgNode; Entry.Ty = V->getType();
8324 
8325     Entry.setAttributes(&CB, I - CB.arg_begin());
8326 
8327     // Use swifterror virtual register as input to the call.
8328     if (Entry.IsSwiftError && TLI.supportSwiftError()) {
8329       SwiftErrorVal = V;
8330       // We find the virtual register for the actual swifterror argument.
8331       // Instead of using the Value, we use the virtual register instead.
8332       Entry.Node =
8333           DAG.getRegister(SwiftError.getOrCreateVRegUseAt(&CB, FuncInfo.MBB, V),
8334                           EVT(TLI.getPointerTy(DL)));
8335     }
8336 
8337     Args.push_back(Entry);
8338 
8339     // If we have an explicit sret argument that is an Instruction, (i.e., it
8340     // might point to function-local memory), we can't meaningfully tail-call.
8341     if (Entry.IsSRet && isa<Instruction>(V))
8342       isTailCall = false;
8343   }
8344 
8345   // If call site has a cfguardtarget operand bundle, create and add an
8346   // additional ArgListEntry.
8347   if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_cfguardtarget)) {
8348     TargetLowering::ArgListEntry Entry;
8349     Value *V = Bundle->Inputs[0];
8350     SDValue ArgNode = getValue(V);
8351     Entry.Node = ArgNode;
8352     Entry.Ty = V->getType();
8353     Entry.IsCFGuardTarget = true;
8354     Args.push_back(Entry);
8355   }
8356 
8357   // Check if target-independent constraints permit a tail call here.
8358   // Target-dependent constraints are checked within TLI->LowerCallTo.
8359   if (isTailCall && !isInTailCallPosition(CB, DAG.getTarget()))
8360     isTailCall = false;
8361 
8362   // Disable tail calls if there is an swifterror argument. Targets have not
8363   // been updated to support tail calls.
8364   if (TLI.supportSwiftError() && SwiftErrorVal)
8365     isTailCall = false;
8366 
8367   ConstantInt *CFIType = nullptr;
8368   if (CB.isIndirectCall()) {
8369     if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_kcfi)) {
8370       if (!TLI.supportKCFIBundles())
8371         report_fatal_error(
8372             "Target doesn't support calls with kcfi operand bundles.");
8373       CFIType = cast<ConstantInt>(Bundle->Inputs[0]);
8374       assert(CFIType->getType()->isIntegerTy(32) && "Invalid CFI type");
8375     }
8376   }
8377 
8378   TargetLowering::CallLoweringInfo CLI(DAG);
8379   CLI.setDebugLoc(getCurSDLoc())
8380       .setChain(getRoot())
8381       .setCallee(RetTy, FTy, Callee, std::move(Args), CB)
8382       .setTailCall(isTailCall)
8383       .setConvergent(CB.isConvergent())
8384       .setIsPreallocated(
8385           CB.countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0)
8386       .setCFIType(CFIType);
8387   std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
8388 
8389   if (Result.first.getNode()) {
8390     Result.first = lowerRangeToAssertZExt(DAG, CB, Result.first);
8391     setValue(&CB, Result.first);
8392   }
8393 
8394   // The last element of CLI.InVals has the SDValue for swifterror return.
8395   // Here we copy it to a virtual register and update SwiftErrorMap for
8396   // book-keeping.
8397   if (SwiftErrorVal && TLI.supportSwiftError()) {
8398     // Get the last element of InVals.
8399     SDValue Src = CLI.InVals.back();
8400     Register VReg =
8401         SwiftError.getOrCreateVRegDefAt(&CB, FuncInfo.MBB, SwiftErrorVal);
8402     SDValue CopyNode = CLI.DAG.getCopyToReg(Result.second, CLI.DL, VReg, Src);
8403     DAG.setRoot(CopyNode);
8404   }
8405 }
8406 
8407 static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
8408                              SelectionDAGBuilder &Builder) {
8409   // Check to see if this load can be trivially constant folded, e.g. if the
8410   // input is from a string literal.
8411   if (const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) {
8412     // Cast pointer to the type we really want to load.
8413     Type *LoadTy =
8414         Type::getIntNTy(PtrVal->getContext(), LoadVT.getScalarSizeInBits());
8415     if (LoadVT.isVector())
8416       LoadTy = FixedVectorType::get(LoadTy, LoadVT.getVectorNumElements());
8417 
8418     LoadInput = ConstantExpr::getBitCast(const_cast<Constant *>(LoadInput),
8419                                          PointerType::getUnqual(LoadTy));
8420 
8421     if (const Constant *LoadCst =
8422             ConstantFoldLoadFromConstPtr(const_cast<Constant *>(LoadInput),
8423                                          LoadTy, Builder.DAG.getDataLayout()))
8424       return Builder.getValue(LoadCst);
8425   }
8426 
8427   // Otherwise, we have to emit the load.  If the pointer is to unfoldable but
8428   // still constant memory, the input chain can be the entry node.
8429   SDValue Root;
8430   bool ConstantMemory = false;
8431 
8432   // Do not serialize (non-volatile) loads of constant memory with anything.
8433   if (Builder.AA && Builder.AA->pointsToConstantMemory(PtrVal)) {
8434     Root = Builder.DAG.getEntryNode();
8435     ConstantMemory = true;
8436   } else {
8437     // Do not serialize non-volatile loads against each other.
8438     Root = Builder.DAG.getRoot();
8439   }
8440 
8441   SDValue Ptr = Builder.getValue(PtrVal);
8442   SDValue LoadVal =
8443       Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root, Ptr,
8444                           MachinePointerInfo(PtrVal), Align(1));
8445 
8446   if (!ConstantMemory)
8447     Builder.PendingLoads.push_back(LoadVal.getValue(1));
8448   return LoadVal;
8449 }
8450 
8451 /// Record the value for an instruction that produces an integer result,
8452 /// converting the type where necessary.
8453 void SelectionDAGBuilder::processIntegerCallValue(const Instruction &I,
8454                                                   SDValue Value,
8455                                                   bool IsSigned) {
8456   EVT VT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
8457                                                     I.getType(), true);
8458   Value = DAG.getExtOrTrunc(IsSigned, Value, getCurSDLoc(), VT);
8459   setValue(&I, Value);
8460 }
8461 
8462 /// See if we can lower a memcmp/bcmp call into an optimized form. If so, return
8463 /// true and lower it. Otherwise return false, and it will be lowered like a
8464 /// normal call.
8465 /// The caller already checked that \p I calls the appropriate LibFunc with a
8466 /// correct prototype.
8467 bool SelectionDAGBuilder::visitMemCmpBCmpCall(const CallInst &I) {
8468   const Value *LHS = I.getArgOperand(0), *RHS = I.getArgOperand(1);
8469   const Value *Size = I.getArgOperand(2);
8470   const ConstantSDNode *CSize = dyn_cast<ConstantSDNode>(getValue(Size));
8471   if (CSize && CSize->getZExtValue() == 0) {
8472     EVT CallVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
8473                                                           I.getType(), true);
8474     setValue(&I, DAG.getConstant(0, getCurSDLoc(), CallVT));
8475     return true;
8476   }
8477 
8478   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
8479   std::pair<SDValue, SDValue> Res = TSI.EmitTargetCodeForMemcmp(
8480       DAG, getCurSDLoc(), DAG.getRoot(), getValue(LHS), getValue(RHS),
8481       getValue(Size), MachinePointerInfo(LHS), MachinePointerInfo(RHS));
8482   if (Res.first.getNode()) {
8483     processIntegerCallValue(I, Res.first, true);
8484     PendingLoads.push_back(Res.second);
8485     return true;
8486   }
8487 
8488   // memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS)  != 0
8489   // memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS)  != 0
8490   if (!CSize || !isOnlyUsedInZeroEqualityComparison(&I))
8491     return false;
8492 
8493   // If the target has a fast compare for the given size, it will return a
8494   // preferred load type for that size. Require that the load VT is legal and
8495   // that the target supports unaligned loads of that type. Otherwise, return
8496   // INVALID.
8497   auto hasFastLoadsAndCompare = [&](unsigned NumBits) {
8498     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8499     MVT LVT = TLI.hasFastEqualityCompare(NumBits);
8500     if (LVT != MVT::INVALID_SIMPLE_VALUE_TYPE) {
8501       // TODO: Handle 5 byte compare as 4-byte + 1 byte.
8502       // TODO: Handle 8 byte compare on x86-32 as two 32-bit loads.
8503       // TODO: Check alignment of src and dest ptrs.
8504       unsigned DstAS = LHS->getType()->getPointerAddressSpace();
8505       unsigned SrcAS = RHS->getType()->getPointerAddressSpace();
8506       if (!TLI.isTypeLegal(LVT) ||
8507           !TLI.allowsMisalignedMemoryAccesses(LVT, SrcAS) ||
8508           !TLI.allowsMisalignedMemoryAccesses(LVT, DstAS))
8509         LVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
8510     }
8511 
8512     return LVT;
8513   };
8514 
8515   // This turns into unaligned loads. We only do this if the target natively
8516   // supports the MVT we'll be loading or if it is small enough (<= 4) that
8517   // we'll only produce a small number of byte loads.
8518   MVT LoadVT;
8519   unsigned NumBitsToCompare = CSize->getZExtValue() * 8;
8520   switch (NumBitsToCompare) {
8521   default:
8522     return false;
8523   case 16:
8524     LoadVT = MVT::i16;
8525     break;
8526   case 32:
8527     LoadVT = MVT::i32;
8528     break;
8529   case 64:
8530   case 128:
8531   case 256:
8532     LoadVT = hasFastLoadsAndCompare(NumBitsToCompare);
8533     break;
8534   }
8535 
8536   if (LoadVT == MVT::INVALID_SIMPLE_VALUE_TYPE)
8537     return false;
8538 
8539   SDValue LoadL = getMemCmpLoad(LHS, LoadVT, *this);
8540   SDValue LoadR = getMemCmpLoad(RHS, LoadVT, *this);
8541 
8542   // Bitcast to a wide integer type if the loads are vectors.
8543   if (LoadVT.isVector()) {
8544     EVT CmpVT = EVT::getIntegerVT(LHS->getContext(), LoadVT.getSizeInBits());
8545     LoadL = DAG.getBitcast(CmpVT, LoadL);
8546     LoadR = DAG.getBitcast(CmpVT, LoadR);
8547   }
8548 
8549   SDValue Cmp = DAG.getSetCC(getCurSDLoc(), MVT::i1, LoadL, LoadR, ISD::SETNE);
8550   processIntegerCallValue(I, Cmp, false);
8551   return true;
8552 }
8553 
8554 /// See if we can lower a memchr call into an optimized form. If so, return
8555 /// true and lower it. Otherwise return false, and it will be lowered like a
8556 /// normal call.
8557 /// The caller already checked that \p I calls the appropriate LibFunc with a
8558 /// correct prototype.
8559 bool SelectionDAGBuilder::visitMemChrCall(const CallInst &I) {
8560   const Value *Src = I.getArgOperand(0);
8561   const Value *Char = I.getArgOperand(1);
8562   const Value *Length = I.getArgOperand(2);
8563 
8564   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
8565   std::pair<SDValue, SDValue> Res =
8566     TSI.EmitTargetCodeForMemchr(DAG, getCurSDLoc(), DAG.getRoot(),
8567                                 getValue(Src), getValue(Char), getValue(Length),
8568                                 MachinePointerInfo(Src));
8569   if (Res.first.getNode()) {
8570     setValue(&I, Res.first);
8571     PendingLoads.push_back(Res.second);
8572     return true;
8573   }
8574 
8575   return false;
8576 }
8577 
8578 /// See if we can lower a mempcpy call into an optimized form. If so, return
8579 /// true and lower it. Otherwise return false, and it will be lowered like a
8580 /// normal call.
8581 /// The caller already checked that \p I calls the appropriate LibFunc with a
8582 /// correct prototype.
8583 bool SelectionDAGBuilder::visitMemPCpyCall(const CallInst &I) {
8584   SDValue Dst = getValue(I.getArgOperand(0));
8585   SDValue Src = getValue(I.getArgOperand(1));
8586   SDValue Size = getValue(I.getArgOperand(2));
8587 
8588   Align DstAlign = DAG.InferPtrAlign(Dst).valueOrOne();
8589   Align SrcAlign = DAG.InferPtrAlign(Src).valueOrOne();
8590   // DAG::getMemcpy needs Alignment to be defined.
8591   Align Alignment = std::min(DstAlign, SrcAlign);
8592 
8593   SDLoc sdl = getCurSDLoc();
8594 
8595   // In the mempcpy context we need to pass in a false value for isTailCall
8596   // because the return pointer needs to be adjusted by the size of
8597   // the copied memory.
8598   SDValue Root = getMemoryRoot();
8599   SDValue MC = DAG.getMemcpy(Root, sdl, Dst, Src, Size, Alignment, false, false,
8600                              /*isTailCall=*/false,
8601                              MachinePointerInfo(I.getArgOperand(0)),
8602                              MachinePointerInfo(I.getArgOperand(1)),
8603                              I.getAAMetadata());
8604   assert(MC.getNode() != nullptr &&
8605          "** memcpy should not be lowered as TailCall in mempcpy context **");
8606   DAG.setRoot(MC);
8607 
8608   // Check if Size needs to be truncated or extended.
8609   Size = DAG.getSExtOrTrunc(Size, sdl, Dst.getValueType());
8610 
8611   // Adjust return pointer to point just past the last dst byte.
8612   SDValue DstPlusSize = DAG.getNode(ISD::ADD, sdl, Dst.getValueType(),
8613                                     Dst, Size);
8614   setValue(&I, DstPlusSize);
8615   return true;
8616 }
8617 
8618 /// See if we can lower a strcpy call into an optimized form.  If so, return
8619 /// true and lower it, otherwise return false and it will be lowered like a
8620 /// normal call.
8621 /// The caller already checked that \p I calls the appropriate LibFunc with a
8622 /// correct prototype.
8623 bool SelectionDAGBuilder::visitStrCpyCall(const CallInst &I, bool isStpcpy) {
8624   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
8625 
8626   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
8627   std::pair<SDValue, SDValue> Res =
8628     TSI.EmitTargetCodeForStrcpy(DAG, getCurSDLoc(), getRoot(),
8629                                 getValue(Arg0), getValue(Arg1),
8630                                 MachinePointerInfo(Arg0),
8631                                 MachinePointerInfo(Arg1), isStpcpy);
8632   if (Res.first.getNode()) {
8633     setValue(&I, Res.first);
8634     DAG.setRoot(Res.second);
8635     return true;
8636   }
8637 
8638   return false;
8639 }
8640 
8641 /// See if we can lower a strcmp call into an optimized form.  If so, return
8642 /// true and lower it, otherwise return false and it will be lowered like a
8643 /// normal call.
8644 /// The caller already checked that \p I calls the appropriate LibFunc with a
8645 /// correct prototype.
8646 bool SelectionDAGBuilder::visitStrCmpCall(const CallInst &I) {
8647   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
8648 
8649   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
8650   std::pair<SDValue, SDValue> Res =
8651     TSI.EmitTargetCodeForStrcmp(DAG, getCurSDLoc(), DAG.getRoot(),
8652                                 getValue(Arg0), getValue(Arg1),
8653                                 MachinePointerInfo(Arg0),
8654                                 MachinePointerInfo(Arg1));
8655   if (Res.first.getNode()) {
8656     processIntegerCallValue(I, Res.first, true);
8657     PendingLoads.push_back(Res.second);
8658     return true;
8659   }
8660 
8661   return false;
8662 }
8663 
8664 /// See if we can lower a strlen call into an optimized form.  If so, return
8665 /// true and lower it, otherwise return false and it will be lowered like a
8666 /// normal call.
8667 /// The caller already checked that \p I calls the appropriate LibFunc with a
8668 /// correct prototype.
8669 bool SelectionDAGBuilder::visitStrLenCall(const CallInst &I) {
8670   const Value *Arg0 = I.getArgOperand(0);
8671 
8672   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
8673   std::pair<SDValue, SDValue> Res =
8674     TSI.EmitTargetCodeForStrlen(DAG, getCurSDLoc(), DAG.getRoot(),
8675                                 getValue(Arg0), MachinePointerInfo(Arg0));
8676   if (Res.first.getNode()) {
8677     processIntegerCallValue(I, Res.first, false);
8678     PendingLoads.push_back(Res.second);
8679     return true;
8680   }
8681 
8682   return false;
8683 }
8684 
8685 /// See if we can lower a strnlen call into an optimized form.  If so, return
8686 /// true and lower it, otherwise return false and it will be lowered like a
8687 /// normal call.
8688 /// The caller already checked that \p I calls the appropriate LibFunc with a
8689 /// correct prototype.
8690 bool SelectionDAGBuilder::visitStrNLenCall(const CallInst &I) {
8691   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
8692 
8693   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
8694   std::pair<SDValue, SDValue> Res =
8695     TSI.EmitTargetCodeForStrnlen(DAG, getCurSDLoc(), DAG.getRoot(),
8696                                  getValue(Arg0), getValue(Arg1),
8697                                  MachinePointerInfo(Arg0));
8698   if (Res.first.getNode()) {
8699     processIntegerCallValue(I, Res.first, false);
8700     PendingLoads.push_back(Res.second);
8701     return true;
8702   }
8703 
8704   return false;
8705 }
8706 
8707 /// See if we can lower a unary floating-point operation into an SDNode with
8708 /// the specified Opcode.  If so, return true and lower it, otherwise return
8709 /// false and it will be lowered like a normal call.
8710 /// The caller already checked that \p I calls the appropriate LibFunc with a
8711 /// correct prototype.
8712 bool SelectionDAGBuilder::visitUnaryFloatCall(const CallInst &I,
8713                                               unsigned Opcode) {
8714   // We already checked this call's prototype; verify it doesn't modify errno.
8715   if (!I.onlyReadsMemory())
8716     return false;
8717 
8718   SDNodeFlags Flags;
8719   Flags.copyFMF(cast<FPMathOperator>(I));
8720 
8721   SDValue Tmp = getValue(I.getArgOperand(0));
8722   setValue(&I,
8723            DAG.getNode(Opcode, getCurSDLoc(), Tmp.getValueType(), Tmp, Flags));
8724   return true;
8725 }
8726 
8727 /// See if we can lower a binary floating-point operation into an SDNode with
8728 /// the specified Opcode. If so, return true and lower it. Otherwise return
8729 /// false, and it will be lowered like a normal call.
8730 /// The caller already checked that \p I calls the appropriate LibFunc with a
8731 /// correct prototype.
8732 bool SelectionDAGBuilder::visitBinaryFloatCall(const CallInst &I,
8733                                                unsigned Opcode) {
8734   // We already checked this call's prototype; verify it doesn't modify errno.
8735   if (!I.onlyReadsMemory())
8736     return false;
8737 
8738   SDNodeFlags Flags;
8739   Flags.copyFMF(cast<FPMathOperator>(I));
8740 
8741   SDValue Tmp0 = getValue(I.getArgOperand(0));
8742   SDValue Tmp1 = getValue(I.getArgOperand(1));
8743   EVT VT = Tmp0.getValueType();
8744   setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), VT, Tmp0, Tmp1, Flags));
8745   return true;
8746 }
8747 
8748 void SelectionDAGBuilder::visitCall(const CallInst &I) {
8749   // Handle inline assembly differently.
8750   if (I.isInlineAsm()) {
8751     visitInlineAsm(I);
8752     return;
8753   }
8754 
8755   diagnoseDontCall(I);
8756 
8757   if (Function *F = I.getCalledFunction()) {
8758     if (F->isDeclaration()) {
8759       // Is this an LLVM intrinsic or a target-specific intrinsic?
8760       unsigned IID = F->getIntrinsicID();
8761       if (!IID)
8762         if (const TargetIntrinsicInfo *II = TM.getIntrinsicInfo())
8763           IID = II->getIntrinsicID(F);
8764 
8765       if (IID) {
8766         visitIntrinsicCall(I, IID);
8767         return;
8768       }
8769     }
8770 
8771     // Check for well-known libc/libm calls.  If the function is internal, it
8772     // can't be a library call.  Don't do the check if marked as nobuiltin for
8773     // some reason or the call site requires strict floating point semantics.
8774     LibFunc Func;
8775     if (!I.isNoBuiltin() && !I.isStrictFP() && !F->hasLocalLinkage() &&
8776         F->hasName() && LibInfo->getLibFunc(*F, Func) &&
8777         LibInfo->hasOptimizedCodeGen(Func)) {
8778       switch (Func) {
8779       default: break;
8780       case LibFunc_bcmp:
8781         if (visitMemCmpBCmpCall(I))
8782           return;
8783         break;
8784       case LibFunc_copysign:
8785       case LibFunc_copysignf:
8786       case LibFunc_copysignl:
8787         // We already checked this call's prototype; verify it doesn't modify
8788         // errno.
8789         if (I.onlyReadsMemory()) {
8790           SDValue LHS = getValue(I.getArgOperand(0));
8791           SDValue RHS = getValue(I.getArgOperand(1));
8792           setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurSDLoc(),
8793                                    LHS.getValueType(), LHS, RHS));
8794           return;
8795         }
8796         break;
8797       case LibFunc_fabs:
8798       case LibFunc_fabsf:
8799       case LibFunc_fabsl:
8800         if (visitUnaryFloatCall(I, ISD::FABS))
8801           return;
8802         break;
8803       case LibFunc_fmin:
8804       case LibFunc_fminf:
8805       case LibFunc_fminl:
8806         if (visitBinaryFloatCall(I, ISD::FMINNUM))
8807           return;
8808         break;
8809       case LibFunc_fmax:
8810       case LibFunc_fmaxf:
8811       case LibFunc_fmaxl:
8812         if (visitBinaryFloatCall(I, ISD::FMAXNUM))
8813           return;
8814         break;
8815       case LibFunc_sin:
8816       case LibFunc_sinf:
8817       case LibFunc_sinl:
8818         if (visitUnaryFloatCall(I, ISD::FSIN))
8819           return;
8820         break;
8821       case LibFunc_cos:
8822       case LibFunc_cosf:
8823       case LibFunc_cosl:
8824         if (visitUnaryFloatCall(I, ISD::FCOS))
8825           return;
8826         break;
8827       case LibFunc_sqrt:
8828       case LibFunc_sqrtf:
8829       case LibFunc_sqrtl:
8830       case LibFunc_sqrt_finite:
8831       case LibFunc_sqrtf_finite:
8832       case LibFunc_sqrtl_finite:
8833         if (visitUnaryFloatCall(I, ISD::FSQRT))
8834           return;
8835         break;
8836       case LibFunc_floor:
8837       case LibFunc_floorf:
8838       case LibFunc_floorl:
8839         if (visitUnaryFloatCall(I, ISD::FFLOOR))
8840           return;
8841         break;
8842       case LibFunc_nearbyint:
8843       case LibFunc_nearbyintf:
8844       case LibFunc_nearbyintl:
8845         if (visitUnaryFloatCall(I, ISD::FNEARBYINT))
8846           return;
8847         break;
8848       case LibFunc_ceil:
8849       case LibFunc_ceilf:
8850       case LibFunc_ceill:
8851         if (visitUnaryFloatCall(I, ISD::FCEIL))
8852           return;
8853         break;
8854       case LibFunc_rint:
8855       case LibFunc_rintf:
8856       case LibFunc_rintl:
8857         if (visitUnaryFloatCall(I, ISD::FRINT))
8858           return;
8859         break;
8860       case LibFunc_round:
8861       case LibFunc_roundf:
8862       case LibFunc_roundl:
8863         if (visitUnaryFloatCall(I, ISD::FROUND))
8864           return;
8865         break;
8866       case LibFunc_trunc:
8867       case LibFunc_truncf:
8868       case LibFunc_truncl:
8869         if (visitUnaryFloatCall(I, ISD::FTRUNC))
8870           return;
8871         break;
8872       case LibFunc_log2:
8873       case LibFunc_log2f:
8874       case LibFunc_log2l:
8875         if (visitUnaryFloatCall(I, ISD::FLOG2))
8876           return;
8877         break;
8878       case LibFunc_exp2:
8879       case LibFunc_exp2f:
8880       case LibFunc_exp2l:
8881         if (visitUnaryFloatCall(I, ISD::FEXP2))
8882           return;
8883         break;
8884       case LibFunc_exp10:
8885       case LibFunc_exp10f:
8886       case LibFunc_exp10l:
8887         if (visitUnaryFloatCall(I, ISD::FEXP10))
8888           return;
8889         break;
8890       case LibFunc_ldexp:
8891       case LibFunc_ldexpf:
8892       case LibFunc_ldexpl:
8893         if (visitBinaryFloatCall(I, ISD::FLDEXP))
8894           return;
8895         break;
8896       case LibFunc_memcmp:
8897         if (visitMemCmpBCmpCall(I))
8898           return;
8899         break;
8900       case LibFunc_mempcpy:
8901         if (visitMemPCpyCall(I))
8902           return;
8903         break;
8904       case LibFunc_memchr:
8905         if (visitMemChrCall(I))
8906           return;
8907         break;
8908       case LibFunc_strcpy:
8909         if (visitStrCpyCall(I, false))
8910           return;
8911         break;
8912       case LibFunc_stpcpy:
8913         if (visitStrCpyCall(I, true))
8914           return;
8915         break;
8916       case LibFunc_strcmp:
8917         if (visitStrCmpCall(I))
8918           return;
8919         break;
8920       case LibFunc_strlen:
8921         if (visitStrLenCall(I))
8922           return;
8923         break;
8924       case LibFunc_strnlen:
8925         if (visitStrNLenCall(I))
8926           return;
8927         break;
8928       }
8929     }
8930   }
8931 
8932   // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
8933   // have to do anything here to lower funclet bundles.
8934   // CFGuardTarget bundles are lowered in LowerCallTo.
8935   assert(!I.hasOperandBundlesOtherThan(
8936              {LLVMContext::OB_deopt, LLVMContext::OB_funclet,
8937               LLVMContext::OB_cfguardtarget, LLVMContext::OB_preallocated,
8938               LLVMContext::OB_clang_arc_attachedcall, LLVMContext::OB_kcfi}) &&
8939          "Cannot lower calls with arbitrary operand bundles!");
8940 
8941   SDValue Callee = getValue(I.getCalledOperand());
8942 
8943   if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
8944     LowerCallSiteWithDeoptBundle(&I, Callee, nullptr);
8945   else
8946     // Check if we can potentially perform a tail call. More detailed checking
8947     // is be done within LowerCallTo, after more information about the call is
8948     // known.
8949     LowerCallTo(I, Callee, I.isTailCall(), I.isMustTailCall());
8950 }
8951 
8952 namespace {
8953 
8954 /// AsmOperandInfo - This contains information for each constraint that we are
8955 /// lowering.
8956 class SDISelAsmOperandInfo : public TargetLowering::AsmOperandInfo {
8957 public:
8958   /// CallOperand - If this is the result output operand or a clobber
8959   /// this is null, otherwise it is the incoming operand to the CallInst.
8960   /// This gets modified as the asm is processed.
8961   SDValue CallOperand;
8962 
8963   /// AssignedRegs - If this is a register or register class operand, this
8964   /// contains the set of register corresponding to the operand.
8965   RegsForValue AssignedRegs;
8966 
8967   explicit SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &info)
8968     : TargetLowering::AsmOperandInfo(info), CallOperand(nullptr, 0) {
8969   }
8970 
8971   /// Whether or not this operand accesses memory
8972   bool hasMemory(const TargetLowering &TLI) const {
8973     // Indirect operand accesses access memory.
8974     if (isIndirect)
8975       return true;
8976 
8977     for (const auto &Code : Codes)
8978       if (TLI.getConstraintType(Code) == TargetLowering::C_Memory)
8979         return true;
8980 
8981     return false;
8982   }
8983 };
8984 
8985 
8986 } // end anonymous namespace
8987 
8988 /// Make sure that the output operand \p OpInfo and its corresponding input
8989 /// operand \p MatchingOpInfo have compatible constraint types (otherwise error
8990 /// out).
8991 static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo,
8992                                SDISelAsmOperandInfo &MatchingOpInfo,
8993                                SelectionDAG &DAG) {
8994   if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT)
8995     return;
8996 
8997   const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
8998   const auto &TLI = DAG.getTargetLoweringInfo();
8999 
9000   std::pair<unsigned, const TargetRegisterClass *> MatchRC =
9001       TLI.getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode,
9002                                        OpInfo.ConstraintVT);
9003   std::pair<unsigned, const TargetRegisterClass *> InputRC =
9004       TLI.getRegForInlineAsmConstraint(TRI, MatchingOpInfo.ConstraintCode,
9005                                        MatchingOpInfo.ConstraintVT);
9006   if ((OpInfo.ConstraintVT.isInteger() !=
9007        MatchingOpInfo.ConstraintVT.isInteger()) ||
9008       (MatchRC.second != InputRC.second)) {
9009     // FIXME: error out in a more elegant fashion
9010     report_fatal_error("Unsupported asm: input constraint"
9011                        " with a matching output constraint of"
9012                        " incompatible type!");
9013   }
9014   MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT;
9015 }
9016 
9017 /// Get a direct memory input to behave well as an indirect operand.
9018 /// This may introduce stores, hence the need for a \p Chain.
9019 /// \return The (possibly updated) chain.
9020 static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location,
9021                                         SDISelAsmOperandInfo &OpInfo,
9022                                         SelectionDAG &DAG) {
9023   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9024 
9025   // If we don't have an indirect input, put it in the constpool if we can,
9026   // otherwise spill it to a stack slot.
9027   // TODO: This isn't quite right. We need to handle these according to
9028   // the addressing mode that the constraint wants. Also, this may take
9029   // an additional register for the computation and we don't want that
9030   // either.
9031 
9032   // If the operand is a float, integer, or vector constant, spill to a
9033   // constant pool entry to get its address.
9034   const Value *OpVal = OpInfo.CallOperandVal;
9035   if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
9036       isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) {
9037     OpInfo.CallOperand = DAG.getConstantPool(
9038         cast<Constant>(OpVal), TLI.getPointerTy(DAG.getDataLayout()));
9039     return Chain;
9040   }
9041 
9042   // Otherwise, create a stack slot and emit a store to it before the asm.
9043   Type *Ty = OpVal->getType();
9044   auto &DL = DAG.getDataLayout();
9045   uint64_t TySize = DL.getTypeAllocSize(Ty);
9046   MachineFunction &MF = DAG.getMachineFunction();
9047   int SSFI = MF.getFrameInfo().CreateStackObject(
9048       TySize, DL.getPrefTypeAlign(Ty), false);
9049   SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getFrameIndexTy(DL));
9050   Chain = DAG.getTruncStore(Chain, Location, OpInfo.CallOperand, StackSlot,
9051                             MachinePointerInfo::getFixedStack(MF, SSFI),
9052                             TLI.getMemValueType(DL, Ty));
9053   OpInfo.CallOperand = StackSlot;
9054 
9055   return Chain;
9056 }
9057 
9058 /// GetRegistersForValue - Assign registers (virtual or physical) for the
9059 /// specified operand.  We prefer to assign virtual registers, to allow the
9060 /// register allocator to handle the assignment process.  However, if the asm
9061 /// uses features that we can't model on machineinstrs, we have SDISel do the
9062 /// allocation.  This produces generally horrible, but correct, code.
9063 ///
9064 ///   OpInfo describes the operand
9065 ///   RefOpInfo describes the matching operand if any, the operand otherwise
9066 static std::optional<unsigned>
9067 getRegistersForValue(SelectionDAG &DAG, const SDLoc &DL,
9068                      SDISelAsmOperandInfo &OpInfo,
9069                      SDISelAsmOperandInfo &RefOpInfo) {
9070   LLVMContext &Context = *DAG.getContext();
9071   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9072 
9073   MachineFunction &MF = DAG.getMachineFunction();
9074   SmallVector<unsigned, 4> Regs;
9075   const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
9076 
9077   // No work to do for memory/address operands.
9078   if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
9079       OpInfo.ConstraintType == TargetLowering::C_Address)
9080     return std::nullopt;
9081 
9082   // If this is a constraint for a single physreg, or a constraint for a
9083   // register class, find it.
9084   unsigned AssignedReg;
9085   const TargetRegisterClass *RC;
9086   std::tie(AssignedReg, RC) = TLI.getRegForInlineAsmConstraint(
9087       &TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT);
9088   // RC is unset only on failure. Return immediately.
9089   if (!RC)
9090     return std::nullopt;
9091 
9092   // Get the actual register value type.  This is important, because the user
9093   // may have asked for (e.g.) the AX register in i32 type.  We need to
9094   // remember that AX is actually i16 to get the right extension.
9095   const MVT RegVT = *TRI.legalclasstypes_begin(*RC);
9096 
9097   if (OpInfo.ConstraintVT != MVT::Other && RegVT != MVT::Untyped) {
9098     // If this is an FP operand in an integer register (or visa versa), or more
9099     // generally if the operand value disagrees with the register class we plan
9100     // to stick it in, fix the operand type.
9101     //
9102     // If this is an input value, the bitcast to the new type is done now.
9103     // Bitcast for output value is done at the end of visitInlineAsm().
9104     if ((OpInfo.Type == InlineAsm::isOutput ||
9105          OpInfo.Type == InlineAsm::isInput) &&
9106         !TRI.isTypeLegalForClass(*RC, OpInfo.ConstraintVT)) {
9107       // Try to convert to the first EVT that the reg class contains.  If the
9108       // types are identical size, use a bitcast to convert (e.g. two differing
9109       // vector types).  Note: output bitcast is done at the end of
9110       // visitInlineAsm().
9111       if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
9112         // Exclude indirect inputs while they are unsupported because the code
9113         // to perform the load is missing and thus OpInfo.CallOperand still
9114         // refers to the input address rather than the pointed-to value.
9115         if (OpInfo.Type == InlineAsm::isInput && !OpInfo.isIndirect)
9116           OpInfo.CallOperand =
9117               DAG.getNode(ISD::BITCAST, DL, RegVT, OpInfo.CallOperand);
9118         OpInfo.ConstraintVT = RegVT;
9119         // If the operand is an FP value and we want it in integer registers,
9120         // use the corresponding integer type. This turns an f64 value into
9121         // i64, which can be passed with two i32 values on a 32-bit machine.
9122       } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
9123         MVT VT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits());
9124         if (OpInfo.Type == InlineAsm::isInput)
9125           OpInfo.CallOperand =
9126               DAG.getNode(ISD::BITCAST, DL, VT, OpInfo.CallOperand);
9127         OpInfo.ConstraintVT = VT;
9128       }
9129     }
9130   }
9131 
9132   // No need to allocate a matching input constraint since the constraint it's
9133   // matching to has already been allocated.
9134   if (OpInfo.isMatchingInputConstraint())
9135     return std::nullopt;
9136 
9137   EVT ValueVT = OpInfo.ConstraintVT;
9138   if (OpInfo.ConstraintVT == MVT::Other)
9139     ValueVT = RegVT;
9140 
9141   // Initialize NumRegs.
9142   unsigned NumRegs = 1;
9143   if (OpInfo.ConstraintVT != MVT::Other)
9144     NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT, RegVT);
9145 
9146   // If this is a constraint for a specific physical register, like {r17},
9147   // assign it now.
9148 
9149   // If this associated to a specific register, initialize iterator to correct
9150   // place. If virtual, make sure we have enough registers
9151 
9152   // Initialize iterator if necessary
9153   TargetRegisterClass::iterator I = RC->begin();
9154   MachineRegisterInfo &RegInfo = MF.getRegInfo();
9155 
9156   // Do not check for single registers.
9157   if (AssignedReg) {
9158     I = std::find(I, RC->end(), AssignedReg);
9159     if (I == RC->end()) {
9160       // RC does not contain the selected register, which indicates a
9161       // mismatch between the register and the required type/bitwidth.
9162       return {AssignedReg};
9163     }
9164   }
9165 
9166   for (; NumRegs; --NumRegs, ++I) {
9167     assert(I != RC->end() && "Ran out of registers to allocate!");
9168     Register R = AssignedReg ? Register(*I) : RegInfo.createVirtualRegister(RC);
9169     Regs.push_back(R);
9170   }
9171 
9172   OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
9173   return std::nullopt;
9174 }
9175 
9176 static unsigned
9177 findMatchingInlineAsmOperand(unsigned OperandNo,
9178                              const std::vector<SDValue> &AsmNodeOperands) {
9179   // Scan until we find the definition we already emitted of this operand.
9180   unsigned CurOp = InlineAsm::Op_FirstOperand;
9181   for (; OperandNo; --OperandNo) {
9182     // Advance to the next operand.
9183     unsigned OpFlag =
9184         cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
9185     const InlineAsm::Flag F(OpFlag);
9186     assert(
9187         (F.isRegDefKind() || F.isRegDefEarlyClobberKind() || F.isMemKind()) &&
9188         "Skipped past definitions?");
9189     CurOp += F.getNumOperandRegisters() + 1;
9190   }
9191   return CurOp;
9192 }
9193 
9194 namespace {
9195 
9196 class ExtraFlags {
9197   unsigned Flags = 0;
9198 
9199 public:
9200   explicit ExtraFlags(const CallBase &Call) {
9201     const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
9202     if (IA->hasSideEffects())
9203       Flags |= InlineAsm::Extra_HasSideEffects;
9204     if (IA->isAlignStack())
9205       Flags |= InlineAsm::Extra_IsAlignStack;
9206     if (Call.isConvergent())
9207       Flags |= InlineAsm::Extra_IsConvergent;
9208     Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
9209   }
9210 
9211   void update(const TargetLowering::AsmOperandInfo &OpInfo) {
9212     // Ideally, we would only check against memory constraints.  However, the
9213     // meaning of an Other constraint can be target-specific and we can't easily
9214     // reason about it.  Therefore, be conservative and set MayLoad/MayStore
9215     // for Other constraints as well.
9216     if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
9217         OpInfo.ConstraintType == TargetLowering::C_Other) {
9218       if (OpInfo.Type == InlineAsm::isInput)
9219         Flags |= InlineAsm::Extra_MayLoad;
9220       else if (OpInfo.Type == InlineAsm::isOutput)
9221         Flags |= InlineAsm::Extra_MayStore;
9222       else if (OpInfo.Type == InlineAsm::isClobber)
9223         Flags |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore);
9224     }
9225   }
9226 
9227   unsigned get() const { return Flags; }
9228 };
9229 
9230 } // end anonymous namespace
9231 
9232 static bool isFunction(SDValue Op) {
9233   if (Op && Op.getOpcode() == ISD::GlobalAddress) {
9234     if (auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
9235       auto Fn = dyn_cast_or_null<Function>(GA->getGlobal());
9236 
9237       // In normal "call dllimport func" instruction (non-inlineasm) it force
9238       // indirect access by specifing call opcode. And usually specially print
9239       // asm with indirect symbol (i.g: "*") according to opcode. Inline asm can
9240       // not do in this way now. (In fact, this is similar with "Data Access"
9241       // action). So here we ignore dllimport function.
9242       if (Fn && !Fn->hasDLLImportStorageClass())
9243         return true;
9244     }
9245   }
9246   return false;
9247 }
9248 
9249 /// visitInlineAsm - Handle a call to an InlineAsm object.
9250 void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call,
9251                                          const BasicBlock *EHPadBB) {
9252   const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
9253 
9254   /// ConstraintOperands - Information about all of the constraints.
9255   SmallVector<SDISelAsmOperandInfo, 16> ConstraintOperands;
9256 
9257   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9258   TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints(
9259       DAG.getDataLayout(), DAG.getSubtarget().getRegisterInfo(), Call);
9260 
9261   // First Pass: Calculate HasSideEffects and ExtraFlags (AlignStack,
9262   // AsmDialect, MayLoad, MayStore).
9263   bool HasSideEffect = IA->hasSideEffects();
9264   ExtraFlags ExtraInfo(Call);
9265 
9266   for (auto &T : TargetConstraints) {
9267     ConstraintOperands.push_back(SDISelAsmOperandInfo(T));
9268     SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
9269 
9270     if (OpInfo.CallOperandVal)
9271       OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
9272 
9273     if (!HasSideEffect)
9274       HasSideEffect = OpInfo.hasMemory(TLI);
9275 
9276     // Determine if this InlineAsm MayLoad or MayStore based on the constraints.
9277     // FIXME: Could we compute this on OpInfo rather than T?
9278 
9279     // Compute the constraint code and ConstraintType to use.
9280     TLI.ComputeConstraintToUse(T, SDValue());
9281 
9282     if (T.ConstraintType == TargetLowering::C_Immediate &&
9283         OpInfo.CallOperand && !isa<ConstantSDNode>(OpInfo.CallOperand))
9284       // We've delayed emitting a diagnostic like the "n" constraint because
9285       // inlining could cause an integer showing up.
9286       return emitInlineAsmError(Call, "constraint '" + Twine(T.ConstraintCode) +
9287                                           "' expects an integer constant "
9288                                           "expression");
9289 
9290     ExtraInfo.update(T);
9291   }
9292 
9293   // We won't need to flush pending loads if this asm doesn't touch
9294   // memory and is nonvolatile.
9295   SDValue Glue, Chain = (HasSideEffect) ? getRoot() : DAG.getRoot();
9296 
9297   bool EmitEHLabels = isa<InvokeInst>(Call);
9298   if (EmitEHLabels) {
9299     assert(EHPadBB && "InvokeInst must have an EHPadBB");
9300   }
9301   bool IsCallBr = isa<CallBrInst>(Call);
9302 
9303   if (IsCallBr || EmitEHLabels) {
9304     // If this is a callbr or invoke we need to flush pending exports since
9305     // inlineasm_br and invoke are terminators.
9306     // We need to do this before nodes are glued to the inlineasm_br node.
9307     Chain = getControlRoot();
9308   }
9309 
9310   MCSymbol *BeginLabel = nullptr;
9311   if (EmitEHLabels) {
9312     Chain = lowerStartEH(Chain, EHPadBB, BeginLabel);
9313   }
9314 
9315   int OpNo = -1;
9316   SmallVector<StringRef> AsmStrs;
9317   IA->collectAsmStrs(AsmStrs);
9318 
9319   // Second pass over the constraints: compute which constraint option to use.
9320   for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
9321     if (OpInfo.hasArg() || OpInfo.Type == InlineAsm::isOutput)
9322       OpNo++;
9323 
9324     // If this is an output operand with a matching input operand, look up the
9325     // matching input. If their types mismatch, e.g. one is an integer, the
9326     // other is floating point, or their sizes are different, flag it as an
9327     // error.
9328     if (OpInfo.hasMatchingInput()) {
9329       SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
9330       patchMatchingInput(OpInfo, Input, DAG);
9331     }
9332 
9333     // Compute the constraint code and ConstraintType to use.
9334     TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
9335 
9336     if ((OpInfo.ConstraintType == TargetLowering::C_Memory &&
9337          OpInfo.Type == InlineAsm::isClobber) ||
9338         OpInfo.ConstraintType == TargetLowering::C_Address)
9339       continue;
9340 
9341     // In Linux PIC model, there are 4 cases about value/label addressing:
9342     //
9343     // 1: Function call or Label jmp inside the module.
9344     // 2: Data access (such as global variable, static variable) inside module.
9345     // 3: Function call or Label jmp outside the module.
9346     // 4: Data access (such as global variable) outside the module.
9347     //
9348     // Due to current llvm inline asm architecture designed to not "recognize"
9349     // the asm code, there are quite troubles for us to treat mem addressing
9350     // differently for same value/adress used in different instuctions.
9351     // For example, in pic model, call a func may in plt way or direclty
9352     // pc-related, but lea/mov a function adress may use got.
9353     //
9354     // Here we try to "recognize" function call for the case 1 and case 3 in
9355     // inline asm. And try to adjust the constraint for them.
9356     //
9357     // TODO: Due to current inline asm didn't encourage to jmp to the outsider
9358     // label, so here we don't handle jmp function label now, but we need to
9359     // enhance it (especilly in PIC model) if we meet meaningful requirements.
9360     if (OpInfo.isIndirect && isFunction(OpInfo.CallOperand) &&
9361         TLI.isInlineAsmTargetBranch(AsmStrs, OpNo) &&
9362         TM.getCodeModel() != CodeModel::Large) {
9363       OpInfo.isIndirect = false;
9364       OpInfo.ConstraintType = TargetLowering::C_Address;
9365     }
9366 
9367     // If this is a memory input, and if the operand is not indirect, do what we
9368     // need to provide an address for the memory input.
9369     if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
9370         !OpInfo.isIndirect) {
9371       assert((OpInfo.isMultipleAlternative ||
9372               (OpInfo.Type == InlineAsm::isInput)) &&
9373              "Can only indirectify direct input operands!");
9374 
9375       // Memory operands really want the address of the value.
9376       Chain = getAddressForMemoryInput(Chain, getCurSDLoc(), OpInfo, DAG);
9377 
9378       // There is no longer a Value* corresponding to this operand.
9379       OpInfo.CallOperandVal = nullptr;
9380 
9381       // It is now an indirect operand.
9382       OpInfo.isIndirect = true;
9383     }
9384 
9385   }
9386 
9387   // AsmNodeOperands - The operands for the ISD::INLINEASM node.
9388   std::vector<SDValue> AsmNodeOperands;
9389   AsmNodeOperands.push_back(SDValue());  // reserve space for input chain
9390   AsmNodeOperands.push_back(DAG.getTargetExternalSymbol(
9391       IA->getAsmString().c_str(), TLI.getProgramPointerTy(DAG.getDataLayout())));
9392 
9393   // If we have a !srcloc metadata node associated with it, we want to attach
9394   // this to the ultimately generated inline asm machineinstr.  To do this, we
9395   // pass in the third operand as this (potentially null) inline asm MDNode.
9396   const MDNode *SrcLoc = Call.getMetadata("srcloc");
9397   AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc));
9398 
9399   // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore
9400   // bits as operand 3.
9401   AsmNodeOperands.push_back(DAG.getTargetConstant(
9402       ExtraInfo.get(), getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
9403 
9404   // Third pass: Loop over operands to prepare DAG-level operands.. As part of
9405   // this, assign virtual and physical registers for inputs and otput.
9406   for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
9407     // Assign Registers.
9408     SDISelAsmOperandInfo &RefOpInfo =
9409         OpInfo.isMatchingInputConstraint()
9410             ? ConstraintOperands[OpInfo.getMatchedOperand()]
9411             : OpInfo;
9412     const auto RegError =
9413         getRegistersForValue(DAG, getCurSDLoc(), OpInfo, RefOpInfo);
9414     if (RegError) {
9415       const MachineFunction &MF = DAG.getMachineFunction();
9416       const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
9417       const char *RegName = TRI.getName(*RegError);
9418       emitInlineAsmError(Call, "register '" + Twine(RegName) +
9419                                    "' allocated for constraint '" +
9420                                    Twine(OpInfo.ConstraintCode) +
9421                                    "' does not match required type");
9422       return;
9423     }
9424 
9425     auto DetectWriteToReservedRegister = [&]() {
9426       const MachineFunction &MF = DAG.getMachineFunction();
9427       const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
9428       for (unsigned Reg : OpInfo.AssignedRegs.Regs) {
9429         if (Register::isPhysicalRegister(Reg) &&
9430             TRI.isInlineAsmReadOnlyReg(MF, Reg)) {
9431           const char *RegName = TRI.getName(Reg);
9432           emitInlineAsmError(Call, "write to reserved register '" +
9433                                        Twine(RegName) + "'");
9434           return true;
9435         }
9436       }
9437       return false;
9438     };
9439     assert((OpInfo.ConstraintType != TargetLowering::C_Address ||
9440             (OpInfo.Type == InlineAsm::isInput &&
9441              !OpInfo.isMatchingInputConstraint())) &&
9442            "Only address as input operand is allowed.");
9443 
9444     switch (OpInfo.Type) {
9445     case InlineAsm::isOutput:
9446       if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
9447         const InlineAsm::ConstraintCode ConstraintID =
9448             TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
9449         assert(ConstraintID != InlineAsm::ConstraintCode::Unknown &&
9450                "Failed to convert memory constraint code to constraint id.");
9451 
9452         // Add information to the INLINEASM node to know about this output.
9453         InlineAsm::Flag OpFlags(InlineAsm::Kind::Mem, 1);
9454         OpFlags.setMemConstraint(ConstraintID);
9455         AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlags, getCurSDLoc(),
9456                                                         MVT::i32));
9457         AsmNodeOperands.push_back(OpInfo.CallOperand);
9458       } else {
9459         // Otherwise, this outputs to a register (directly for C_Register /
9460         // C_RegisterClass, and a target-defined fashion for
9461         // C_Immediate/C_Other). Find a register that we can use.
9462         if (OpInfo.AssignedRegs.Regs.empty()) {
9463           emitInlineAsmError(
9464               Call, "couldn't allocate output register for constraint '" +
9465                         Twine(OpInfo.ConstraintCode) + "'");
9466           return;
9467         }
9468 
9469         if (DetectWriteToReservedRegister())
9470           return;
9471 
9472         // Add information to the INLINEASM node to know that this register is
9473         // set.
9474         OpInfo.AssignedRegs.AddInlineAsmOperands(
9475             OpInfo.isEarlyClobber ? InlineAsm::Kind::RegDefEarlyClobber
9476                                   : InlineAsm::Kind::RegDef,
9477             false, 0, getCurSDLoc(), DAG, AsmNodeOperands);
9478       }
9479       break;
9480 
9481     case InlineAsm::isInput:
9482     case InlineAsm::isLabel: {
9483       SDValue InOperandVal = OpInfo.CallOperand;
9484 
9485       if (OpInfo.isMatchingInputConstraint()) {
9486         // If this is required to match an output register we have already set,
9487         // just use its register.
9488         auto CurOp = findMatchingInlineAsmOperand(OpInfo.getMatchedOperand(),
9489                                                   AsmNodeOperands);
9490         InlineAsm::Flag Flag(
9491             cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue());
9492         if (Flag.isRegDefKind() || Flag.isRegDefEarlyClobberKind()) {
9493           if (OpInfo.isIndirect) {
9494             // This happens on gcc/testsuite/gcc.dg/pr8788-1.c
9495             emitInlineAsmError(Call, "inline asm not supported yet: "
9496                                      "don't know how to handle tied "
9497                                      "indirect register inputs");
9498             return;
9499           }
9500 
9501           SmallVector<unsigned, 4> Regs;
9502           MachineFunction &MF = DAG.getMachineFunction();
9503           MachineRegisterInfo &MRI = MF.getRegInfo();
9504           const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
9505           auto *R = cast<RegisterSDNode>(AsmNodeOperands[CurOp+1]);
9506           Register TiedReg = R->getReg();
9507           MVT RegVT = R->getSimpleValueType(0);
9508           const TargetRegisterClass *RC =
9509               TiedReg.isVirtual()     ? MRI.getRegClass(TiedReg)
9510               : RegVT != MVT::Untyped ? TLI.getRegClassFor(RegVT)
9511                                       : TRI.getMinimalPhysRegClass(TiedReg);
9512           for (unsigned i = 0, e = Flag.getNumOperandRegisters(); i != e; ++i)
9513             Regs.push_back(MRI.createVirtualRegister(RC));
9514 
9515           RegsForValue MatchedRegs(Regs, RegVT, InOperandVal.getValueType());
9516 
9517           SDLoc dl = getCurSDLoc();
9518           // Use the produced MatchedRegs object to
9519           MatchedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Glue, &Call);
9520           MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind::RegUse, true,
9521                                            OpInfo.getMatchedOperand(), dl, DAG,
9522                                            AsmNodeOperands);
9523           break;
9524         }
9525 
9526         assert(Flag.isMemKind() && "Unknown matching constraint!");
9527         assert(Flag.getNumOperandRegisters() == 1 &&
9528                "Unexpected number of operands");
9529         // Add information to the INLINEASM node to know about this input.
9530         // See InlineAsm.h isUseOperandTiedToDef.
9531         Flag.clearMemConstraint();
9532         Flag.setMatchingOp(OpInfo.getMatchedOperand());
9533         AsmNodeOperands.push_back(DAG.getTargetConstant(
9534             Flag, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
9535         AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
9536         break;
9537       }
9538 
9539       // Treat indirect 'X' constraint as memory.
9540       if (OpInfo.ConstraintType == TargetLowering::C_Other &&
9541           OpInfo.isIndirect)
9542         OpInfo.ConstraintType = TargetLowering::C_Memory;
9543 
9544       if (OpInfo.ConstraintType == TargetLowering::C_Immediate ||
9545           OpInfo.ConstraintType == TargetLowering::C_Other) {
9546         std::vector<SDValue> Ops;
9547         TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode,
9548                                           Ops, DAG);
9549         if (Ops.empty()) {
9550           if (OpInfo.ConstraintType == TargetLowering::C_Immediate)
9551             if (isa<ConstantSDNode>(InOperandVal)) {
9552               emitInlineAsmError(Call, "value out of range for constraint '" +
9553                                            Twine(OpInfo.ConstraintCode) + "'");
9554               return;
9555             }
9556 
9557           emitInlineAsmError(Call,
9558                              "invalid operand for inline asm constraint '" +
9559                                  Twine(OpInfo.ConstraintCode) + "'");
9560           return;
9561         }
9562 
9563         // Add information to the INLINEASM node to know about this input.
9564         InlineAsm::Flag ResOpType(InlineAsm::Kind::Imm, Ops.size());
9565         AsmNodeOperands.push_back(DAG.getTargetConstant(
9566             ResOpType, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
9567         llvm::append_range(AsmNodeOperands, Ops);
9568         break;
9569       }
9570 
9571       if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
9572         assert((OpInfo.isIndirect ||
9573                 OpInfo.ConstraintType != TargetLowering::C_Memory) &&
9574                "Operand must be indirect to be a mem!");
9575         assert(InOperandVal.getValueType() ==
9576                    TLI.getPointerTy(DAG.getDataLayout()) &&
9577                "Memory operands expect pointer values");
9578 
9579         const InlineAsm::ConstraintCode ConstraintID =
9580             TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
9581         assert(ConstraintID != InlineAsm::ConstraintCode::Unknown &&
9582                "Failed to convert memory constraint code to constraint id.");
9583 
9584         // Add information to the INLINEASM node to know about this input.
9585         InlineAsm::Flag ResOpType(InlineAsm::Kind::Mem, 1);
9586         ResOpType.setMemConstraint(ConstraintID);
9587         AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
9588                                                         getCurSDLoc(),
9589                                                         MVT::i32));
9590         AsmNodeOperands.push_back(InOperandVal);
9591         break;
9592       }
9593 
9594       if (OpInfo.ConstraintType == TargetLowering::C_Address) {
9595         const InlineAsm::ConstraintCode ConstraintID =
9596             TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
9597         assert(ConstraintID != InlineAsm::ConstraintCode::Unknown &&
9598                "Failed to convert memory constraint code to constraint id.");
9599 
9600         InlineAsm::Flag ResOpType(InlineAsm::Kind::Mem, 1);
9601 
9602         SDValue AsmOp = InOperandVal;
9603         if (isFunction(InOperandVal)) {
9604           auto *GA = cast<GlobalAddressSDNode>(InOperandVal);
9605           ResOpType = InlineAsm::Flag(InlineAsm::Kind::Func, 1);
9606           AsmOp = DAG.getTargetGlobalAddress(GA->getGlobal(), getCurSDLoc(),
9607                                              InOperandVal.getValueType(),
9608                                              GA->getOffset());
9609         }
9610 
9611         // Add information to the INLINEASM node to know about this input.
9612         ResOpType.setMemConstraint(ConstraintID);
9613 
9614         AsmNodeOperands.push_back(
9615             DAG.getTargetConstant(ResOpType, getCurSDLoc(), MVT::i32));
9616 
9617         AsmNodeOperands.push_back(AsmOp);
9618         break;
9619       }
9620 
9621       assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
9622               OpInfo.ConstraintType == TargetLowering::C_Register) &&
9623              "Unknown constraint type!");
9624 
9625       // TODO: Support this.
9626       if (OpInfo.isIndirect) {
9627         emitInlineAsmError(
9628             Call, "Don't know how to handle indirect register inputs yet "
9629                   "for constraint '" +
9630                       Twine(OpInfo.ConstraintCode) + "'");
9631         return;
9632       }
9633 
9634       // Copy the input into the appropriate registers.
9635       if (OpInfo.AssignedRegs.Regs.empty()) {
9636         emitInlineAsmError(Call,
9637                            "couldn't allocate input reg for constraint '" +
9638                                Twine(OpInfo.ConstraintCode) + "'");
9639         return;
9640       }
9641 
9642       if (DetectWriteToReservedRegister())
9643         return;
9644 
9645       SDLoc dl = getCurSDLoc();
9646 
9647       OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Glue,
9648                                         &Call);
9649 
9650       OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind::RegUse, false,
9651                                                0, dl, DAG, AsmNodeOperands);
9652       break;
9653     }
9654     case InlineAsm::isClobber:
9655       // Add the clobbered value to the operand list, so that the register
9656       // allocator is aware that the physreg got clobbered.
9657       if (!OpInfo.AssignedRegs.Regs.empty())
9658         OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind::Clobber,
9659                                                  false, 0, getCurSDLoc(), DAG,
9660                                                  AsmNodeOperands);
9661       break;
9662     }
9663   }
9664 
9665   // Finish up input operands.  Set the input chain and add the flag last.
9666   AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
9667   if (Glue.getNode()) AsmNodeOperands.push_back(Glue);
9668 
9669   unsigned ISDOpc = IsCallBr ? ISD::INLINEASM_BR : ISD::INLINEASM;
9670   Chain = DAG.getNode(ISDOpc, getCurSDLoc(),
9671                       DAG.getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
9672   Glue = Chain.getValue(1);
9673 
9674   // Do additional work to generate outputs.
9675 
9676   SmallVector<EVT, 1> ResultVTs;
9677   SmallVector<SDValue, 1> ResultValues;
9678   SmallVector<SDValue, 8> OutChains;
9679 
9680   llvm::Type *CallResultType = Call.getType();
9681   ArrayRef<Type *> ResultTypes;
9682   if (StructType *StructResult = dyn_cast<StructType>(CallResultType))
9683     ResultTypes = StructResult->elements();
9684   else if (!CallResultType->isVoidTy())
9685     ResultTypes = ArrayRef(CallResultType);
9686 
9687   auto CurResultType = ResultTypes.begin();
9688   auto handleRegAssign = [&](SDValue V) {
9689     assert(CurResultType != ResultTypes.end() && "Unexpected value");
9690     assert((*CurResultType)->isSized() && "Unexpected unsized type");
9691     EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), *CurResultType);
9692     ++CurResultType;
9693     // If the type of the inline asm call site return value is different but has
9694     // same size as the type of the asm output bitcast it.  One example of this
9695     // is for vectors with different width / number of elements.  This can
9696     // happen for register classes that can contain multiple different value
9697     // types.  The preg or vreg allocated may not have the same VT as was
9698     // expected.
9699     //
9700     // This can also happen for a return value that disagrees with the register
9701     // class it is put in, eg. a double in a general-purpose register on a
9702     // 32-bit machine.
9703     if (ResultVT != V.getValueType() &&
9704         ResultVT.getSizeInBits() == V.getValueSizeInBits())
9705       V = DAG.getNode(ISD::BITCAST, getCurSDLoc(), ResultVT, V);
9706     else if (ResultVT != V.getValueType() && ResultVT.isInteger() &&
9707              V.getValueType().isInteger()) {
9708       // If a result value was tied to an input value, the computed result
9709       // may have a wider width than the expected result.  Extract the
9710       // relevant portion.
9711       V = DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), ResultVT, V);
9712     }
9713     assert(ResultVT == V.getValueType() && "Asm result value mismatch!");
9714     ResultVTs.push_back(ResultVT);
9715     ResultValues.push_back(V);
9716   };
9717 
9718   // Deal with output operands.
9719   for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
9720     if (OpInfo.Type == InlineAsm::isOutput) {
9721       SDValue Val;
9722       // Skip trivial output operands.
9723       if (OpInfo.AssignedRegs.Regs.empty())
9724         continue;
9725 
9726       switch (OpInfo.ConstraintType) {
9727       case TargetLowering::C_Register:
9728       case TargetLowering::C_RegisterClass:
9729         Val = OpInfo.AssignedRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(),
9730                                                   Chain, &Glue, &Call);
9731         break;
9732       case TargetLowering::C_Immediate:
9733       case TargetLowering::C_Other:
9734         Val = TLI.LowerAsmOutputForConstraint(Chain, Glue, getCurSDLoc(),
9735                                               OpInfo, DAG);
9736         break;
9737       case TargetLowering::C_Memory:
9738         break; // Already handled.
9739       case TargetLowering::C_Address:
9740         break; // Silence warning.
9741       case TargetLowering::C_Unknown:
9742         assert(false && "Unexpected unknown constraint");
9743       }
9744 
9745       // Indirect output manifest as stores. Record output chains.
9746       if (OpInfo.isIndirect) {
9747         const Value *Ptr = OpInfo.CallOperandVal;
9748         assert(Ptr && "Expected value CallOperandVal for indirect asm operand");
9749         SDValue Store = DAG.getStore(Chain, getCurSDLoc(), Val, getValue(Ptr),
9750                                      MachinePointerInfo(Ptr));
9751         OutChains.push_back(Store);
9752       } else {
9753         // generate CopyFromRegs to associated registers.
9754         assert(!Call.getType()->isVoidTy() && "Bad inline asm!");
9755         if (Val.getOpcode() == ISD::MERGE_VALUES) {
9756           for (const SDValue &V : Val->op_values())
9757             handleRegAssign(V);
9758         } else
9759           handleRegAssign(Val);
9760       }
9761     }
9762   }
9763 
9764   // Set results.
9765   if (!ResultValues.empty()) {
9766     assert(CurResultType == ResultTypes.end() &&
9767            "Mismatch in number of ResultTypes");
9768     assert(ResultValues.size() == ResultTypes.size() &&
9769            "Mismatch in number of output operands in asm result");
9770 
9771     SDValue V = DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
9772                             DAG.getVTList(ResultVTs), ResultValues);
9773     setValue(&Call, V);
9774   }
9775 
9776   // Collect store chains.
9777   if (!OutChains.empty())
9778     Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, OutChains);
9779 
9780   if (EmitEHLabels) {
9781     Chain = lowerEndEH(Chain, cast<InvokeInst>(&Call), EHPadBB, BeginLabel);
9782   }
9783 
9784   // Only Update Root if inline assembly has a memory effect.
9785   if (ResultValues.empty() || HasSideEffect || !OutChains.empty() || IsCallBr ||
9786       EmitEHLabels)
9787     DAG.setRoot(Chain);
9788 }
9789 
9790 void SelectionDAGBuilder::emitInlineAsmError(const CallBase &Call,
9791                                              const Twine &Message) {
9792   LLVMContext &Ctx = *DAG.getContext();
9793   Ctx.emitError(&Call, Message);
9794 
9795   // Make sure we leave the DAG in a valid state
9796   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9797   SmallVector<EVT, 1> ValueVTs;
9798   ComputeValueVTs(TLI, DAG.getDataLayout(), Call.getType(), ValueVTs);
9799 
9800   if (ValueVTs.empty())
9801     return;
9802 
9803   SmallVector<SDValue, 1> Ops;
9804   for (unsigned i = 0, e = ValueVTs.size(); i != e; ++i)
9805     Ops.push_back(DAG.getUNDEF(ValueVTs[i]));
9806 
9807   setValue(&Call, DAG.getMergeValues(Ops, getCurSDLoc()));
9808 }
9809 
9810 void SelectionDAGBuilder::visitVAStart(const CallInst &I) {
9811   DAG.setRoot(DAG.getNode(ISD::VASTART, getCurSDLoc(),
9812                           MVT::Other, getRoot(),
9813                           getValue(I.getArgOperand(0)),
9814                           DAG.getSrcValue(I.getArgOperand(0))));
9815 }
9816 
9817 void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
9818   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9819   const DataLayout &DL = DAG.getDataLayout();
9820   SDValue V = DAG.getVAArg(
9821       TLI.getMemValueType(DAG.getDataLayout(), I.getType()), getCurSDLoc(),
9822       getRoot(), getValue(I.getOperand(0)), DAG.getSrcValue(I.getOperand(0)),
9823       DL.getABITypeAlign(I.getType()).value());
9824   DAG.setRoot(V.getValue(1));
9825 
9826   if (I.getType()->isPointerTy())
9827     V = DAG.getPtrExtOrTrunc(
9828         V, getCurSDLoc(), TLI.getValueType(DAG.getDataLayout(), I.getType()));
9829   setValue(&I, V);
9830 }
9831 
9832 void SelectionDAGBuilder::visitVAEnd(const CallInst &I) {
9833   DAG.setRoot(DAG.getNode(ISD::VAEND, getCurSDLoc(),
9834                           MVT::Other, getRoot(),
9835                           getValue(I.getArgOperand(0)),
9836                           DAG.getSrcValue(I.getArgOperand(0))));
9837 }
9838 
9839 void SelectionDAGBuilder::visitVACopy(const CallInst &I) {
9840   DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurSDLoc(),
9841                           MVT::Other, getRoot(),
9842                           getValue(I.getArgOperand(0)),
9843                           getValue(I.getArgOperand(1)),
9844                           DAG.getSrcValue(I.getArgOperand(0)),
9845                           DAG.getSrcValue(I.getArgOperand(1))));
9846 }
9847 
9848 SDValue SelectionDAGBuilder::lowerRangeToAssertZExt(SelectionDAG &DAG,
9849                                                     const Instruction &I,
9850                                                     SDValue Op) {
9851   const MDNode *Range = getRangeMetadata(I);
9852   if (!Range)
9853     return Op;
9854 
9855   ConstantRange CR = getConstantRangeFromMetadata(*Range);
9856   if (CR.isFullSet() || CR.isEmptySet() || CR.isUpperWrapped())
9857     return Op;
9858 
9859   APInt Lo = CR.getUnsignedMin();
9860   if (!Lo.isMinValue())
9861     return Op;
9862 
9863   APInt Hi = CR.getUnsignedMax();
9864   unsigned Bits = std::max(Hi.getActiveBits(),
9865                            static_cast<unsigned>(IntegerType::MIN_INT_BITS));
9866 
9867   EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
9868 
9869   SDLoc SL = getCurSDLoc();
9870 
9871   SDValue ZExt = DAG.getNode(ISD::AssertZext, SL, Op.getValueType(), Op,
9872                              DAG.getValueType(SmallVT));
9873   unsigned NumVals = Op.getNode()->getNumValues();
9874   if (NumVals == 1)
9875     return ZExt;
9876 
9877   SmallVector<SDValue, 4> Ops;
9878 
9879   Ops.push_back(ZExt);
9880   for (unsigned I = 1; I != NumVals; ++I)
9881     Ops.push_back(Op.getValue(I));
9882 
9883   return DAG.getMergeValues(Ops, SL);
9884 }
9885 
9886 /// Populate a CallLowerinInfo (into \p CLI) based on the properties of
9887 /// the call being lowered.
9888 ///
9889 /// This is a helper for lowering intrinsics that follow a target calling
9890 /// convention or require stack pointer adjustment. Only a subset of the
9891 /// intrinsic's operands need to participate in the calling convention.
9892 void SelectionDAGBuilder::populateCallLoweringInfo(
9893     TargetLowering::CallLoweringInfo &CLI, const CallBase *Call,
9894     unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy,
9895     AttributeSet RetAttrs, bool IsPatchPoint) {
9896   TargetLowering::ArgListTy Args;
9897   Args.reserve(NumArgs);
9898 
9899   // Populate the argument list.
9900   // Attributes for args start at offset 1, after the return attribute.
9901   for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs;
9902        ArgI != ArgE; ++ArgI) {
9903     const Value *V = Call->getOperand(ArgI);
9904 
9905     assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
9906 
9907     TargetLowering::ArgListEntry Entry;
9908     Entry.Node = getValue(V);
9909     Entry.Ty = V->getType();
9910     Entry.setAttributes(Call, ArgI);
9911     Args.push_back(Entry);
9912   }
9913 
9914   CLI.setDebugLoc(getCurSDLoc())
9915       .setChain(getRoot())
9916       .setCallee(Call->getCallingConv(), ReturnTy, Callee, std::move(Args),
9917                  RetAttrs)
9918       .setDiscardResult(Call->use_empty())
9919       .setIsPatchPoint(IsPatchPoint)
9920       .setIsPreallocated(
9921           Call->countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0);
9922 }
9923 
9924 /// Add a stack map intrinsic call's live variable operands to a stackmap
9925 /// or patchpoint target node's operand list.
9926 ///
9927 /// Constants are converted to TargetConstants purely as an optimization to
9928 /// avoid constant materialization and register allocation.
9929 ///
9930 /// FrameIndex operands are converted to TargetFrameIndex so that ISEL does not
9931 /// generate addess computation nodes, and so FinalizeISel can convert the
9932 /// TargetFrameIndex into a DirectMemRefOp StackMap location. This avoids
9933 /// address materialization and register allocation, but may also be required
9934 /// for correctness. If a StackMap (or PatchPoint) intrinsic directly uses an
9935 /// alloca in the entry block, then the runtime may assume that the alloca's
9936 /// StackMap location can be read immediately after compilation and that the
9937 /// location is valid at any point during execution (this is similar to the
9938 /// assumption made by the llvm.gcroot intrinsic). If the alloca's location were
9939 /// only available in a register, then the runtime would need to trap when
9940 /// execution reaches the StackMap in order to read the alloca's location.
9941 static void addStackMapLiveVars(const CallBase &Call, unsigned StartIdx,
9942                                 const SDLoc &DL, SmallVectorImpl<SDValue> &Ops,
9943                                 SelectionDAGBuilder &Builder) {
9944   SelectionDAG &DAG = Builder.DAG;
9945   for (unsigned I = StartIdx; I < Call.arg_size(); I++) {
9946     SDValue Op = Builder.getValue(Call.getArgOperand(I));
9947 
9948     // Things on the stack are pointer-typed, meaning that they are already
9949     // legal and can be emitted directly to target nodes.
9950     if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op)) {
9951       Ops.push_back(DAG.getTargetFrameIndex(FI->getIndex(), Op.getValueType()));
9952     } else {
9953       // Otherwise emit a target independent node to be legalised.
9954       Ops.push_back(Builder.getValue(Call.getArgOperand(I)));
9955     }
9956   }
9957 }
9958 
9959 /// Lower llvm.experimental.stackmap.
9960 void SelectionDAGBuilder::visitStackmap(const CallInst &CI) {
9961   // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
9962   //                                  [live variables...])
9963 
9964   assert(CI.getType()->isVoidTy() && "Stackmap cannot return a value.");
9965 
9966   SDValue Chain, InGlue, Callee;
9967   SmallVector<SDValue, 32> Ops;
9968 
9969   SDLoc DL = getCurSDLoc();
9970   Callee = getValue(CI.getCalledOperand());
9971 
9972   // The stackmap intrinsic only records the live variables (the arguments
9973   // passed to it) and emits NOPS (if requested). Unlike the patchpoint
9974   // intrinsic, this won't be lowered to a function call. This means we don't
9975   // have to worry about calling conventions and target specific lowering code.
9976   // Instead we perform the call lowering right here.
9977   //
9978   // chain, flag = CALLSEQ_START(chain, 0, 0)
9979   // chain, flag = STACKMAP(id, nbytes, ..., chain, flag)
9980   // chain, flag = CALLSEQ_END(chain, 0, 0, flag)
9981   //
9982   Chain = DAG.getCALLSEQ_START(getRoot(), 0, 0, DL);
9983   InGlue = Chain.getValue(1);
9984 
9985   // Add the STACKMAP operands, starting with DAG house-keeping.
9986   Ops.push_back(Chain);
9987   Ops.push_back(InGlue);
9988 
9989   // Add the <id>, <numShadowBytes> operands.
9990   //
9991   // These do not require legalisation, and can be emitted directly to target
9992   // constant nodes.
9993   SDValue ID = getValue(CI.getArgOperand(0));
9994   assert(ID.getValueType() == MVT::i64);
9995   SDValue IDConst = DAG.getTargetConstant(
9996       cast<ConstantSDNode>(ID)->getZExtValue(), DL, ID.getValueType());
9997   Ops.push_back(IDConst);
9998 
9999   SDValue Shad = getValue(CI.getArgOperand(1));
10000   assert(Shad.getValueType() == MVT::i32);
10001   SDValue ShadConst = DAG.getTargetConstant(
10002       cast<ConstantSDNode>(Shad)->getZExtValue(), DL, Shad.getValueType());
10003   Ops.push_back(ShadConst);
10004 
10005   // Add the live variables.
10006   addStackMapLiveVars(CI, 2, DL, Ops, *this);
10007 
10008   // Create the STACKMAP node.
10009   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
10010   Chain = DAG.getNode(ISD::STACKMAP, DL, NodeTys, Ops);
10011   InGlue = Chain.getValue(1);
10012 
10013   Chain = DAG.getCALLSEQ_END(Chain, 0, 0, InGlue, DL);
10014 
10015   // Stackmaps don't generate values, so nothing goes into the NodeMap.
10016 
10017   // Set the root to the target-lowered call chain.
10018   DAG.setRoot(Chain);
10019 
10020   // Inform the Frame Information that we have a stackmap in this function.
10021   FuncInfo.MF->getFrameInfo().setHasStackMap();
10022 }
10023 
10024 /// Lower llvm.experimental.patchpoint directly to its target opcode.
10025 void SelectionDAGBuilder::visitPatchpoint(const CallBase &CB,
10026                                           const BasicBlock *EHPadBB) {
10027   // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
10028   //                                                 i32 <numBytes>,
10029   //                                                 i8* <target>,
10030   //                                                 i32 <numArgs>,
10031   //                                                 [Args...],
10032   //                                                 [live variables...])
10033 
10034   CallingConv::ID CC = CB.getCallingConv();
10035   bool IsAnyRegCC = CC == CallingConv::AnyReg;
10036   bool HasDef = !CB.getType()->isVoidTy();
10037   SDLoc dl = getCurSDLoc();
10038   SDValue Callee = getValue(CB.getArgOperand(PatchPointOpers::TargetPos));
10039 
10040   // Handle immediate and symbolic callees.
10041   if (auto* ConstCallee = dyn_cast<ConstantSDNode>(Callee))
10042     Callee = DAG.getIntPtrConstant(ConstCallee->getZExtValue(), dl,
10043                                    /*isTarget=*/true);
10044   else if (auto* SymbolicCallee = dyn_cast<GlobalAddressSDNode>(Callee))
10045     Callee =  DAG.getTargetGlobalAddress(SymbolicCallee->getGlobal(),
10046                                          SDLoc(SymbolicCallee),
10047                                          SymbolicCallee->getValueType(0));
10048 
10049   // Get the real number of arguments participating in the call <numArgs>
10050   SDValue NArgVal = getValue(CB.getArgOperand(PatchPointOpers::NArgPos));
10051   unsigned NumArgs = cast<ConstantSDNode>(NArgVal)->getZExtValue();
10052 
10053   // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
10054   // Intrinsics include all meta-operands up to but not including CC.
10055   unsigned NumMetaOpers = PatchPointOpers::CCPos;
10056   assert(CB.arg_size() >= NumMetaOpers + NumArgs &&
10057          "Not enough arguments provided to the patchpoint intrinsic");
10058 
10059   // For AnyRegCC the arguments are lowered later on manually.
10060   unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
10061   Type *ReturnTy =
10062       IsAnyRegCC ? Type::getVoidTy(*DAG.getContext()) : CB.getType();
10063 
10064   TargetLowering::CallLoweringInfo CLI(DAG);
10065   populateCallLoweringInfo(CLI, &CB, NumMetaOpers, NumCallArgs, Callee,
10066                            ReturnTy, CB.getAttributes().getRetAttrs(), true);
10067   std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
10068 
10069   SDNode *CallEnd = Result.second.getNode();
10070   if (HasDef && (CallEnd->getOpcode() == ISD::CopyFromReg))
10071     CallEnd = CallEnd->getOperand(0).getNode();
10072 
10073   /// Get a call instruction from the call sequence chain.
10074   /// Tail calls are not allowed.
10075   assert(CallEnd->getOpcode() == ISD::CALLSEQ_END &&
10076          "Expected a callseq node.");
10077   SDNode *Call = CallEnd->getOperand(0).getNode();
10078   bool HasGlue = Call->getGluedNode();
10079 
10080   // Replace the target specific call node with the patchable intrinsic.
10081   SmallVector<SDValue, 8> Ops;
10082 
10083   // Push the chain.
10084   Ops.push_back(*(Call->op_begin()));
10085 
10086   // Optionally, push the glue (if any).
10087   if (HasGlue)
10088     Ops.push_back(*(Call->op_end() - 1));
10089 
10090   // Push the register mask info.
10091   if (HasGlue)
10092     Ops.push_back(*(Call->op_end() - 2));
10093   else
10094     Ops.push_back(*(Call->op_end() - 1));
10095 
10096   // Add the <id> and <numBytes> constants.
10097   SDValue IDVal = getValue(CB.getArgOperand(PatchPointOpers::IDPos));
10098   Ops.push_back(DAG.getTargetConstant(
10099                   cast<ConstantSDNode>(IDVal)->getZExtValue(), dl, MVT::i64));
10100   SDValue NBytesVal = getValue(CB.getArgOperand(PatchPointOpers::NBytesPos));
10101   Ops.push_back(DAG.getTargetConstant(
10102                   cast<ConstantSDNode>(NBytesVal)->getZExtValue(), dl,
10103                   MVT::i32));
10104 
10105   // Add the callee.
10106   Ops.push_back(Callee);
10107 
10108   // Adjust <numArgs> to account for any arguments that have been passed on the
10109   // stack instead.
10110   // Call Node: Chain, Target, {Args}, RegMask, [Glue]
10111   unsigned NumCallRegArgs = Call->getNumOperands() - (HasGlue ? 4 : 3);
10112   NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs;
10113   Ops.push_back(DAG.getTargetConstant(NumCallRegArgs, dl, MVT::i32));
10114 
10115   // Add the calling convention
10116   Ops.push_back(DAG.getTargetConstant((unsigned)CC, dl, MVT::i32));
10117 
10118   // Add the arguments we omitted previously. The register allocator should
10119   // place these in any free register.
10120   if (IsAnyRegCC)
10121     for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i)
10122       Ops.push_back(getValue(CB.getArgOperand(i)));
10123 
10124   // Push the arguments from the call instruction.
10125   SDNode::op_iterator e = HasGlue ? Call->op_end()-2 : Call->op_end()-1;
10126   Ops.append(Call->op_begin() + 2, e);
10127 
10128   // Push live variables for the stack map.
10129   addStackMapLiveVars(CB, NumMetaOpers + NumArgs, dl, Ops, *this);
10130 
10131   SDVTList NodeTys;
10132   if (IsAnyRegCC && HasDef) {
10133     // Create the return types based on the intrinsic definition
10134     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10135     SmallVector<EVT, 3> ValueVTs;
10136     ComputeValueVTs(TLI, DAG.getDataLayout(), CB.getType(), ValueVTs);
10137     assert(ValueVTs.size() == 1 && "Expected only one return value type.");
10138 
10139     // There is always a chain and a glue type at the end
10140     ValueVTs.push_back(MVT::Other);
10141     ValueVTs.push_back(MVT::Glue);
10142     NodeTys = DAG.getVTList(ValueVTs);
10143   } else
10144     NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
10145 
10146   // Replace the target specific call node with a PATCHPOINT node.
10147   SDValue PPV = DAG.getNode(ISD::PATCHPOINT, dl, NodeTys, Ops);
10148 
10149   // Update the NodeMap.
10150   if (HasDef) {
10151     if (IsAnyRegCC)
10152       setValue(&CB, SDValue(PPV.getNode(), 0));
10153     else
10154       setValue(&CB, Result.first);
10155   }
10156 
10157   // Fixup the consumers of the intrinsic. The chain and glue may be used in the
10158   // call sequence. Furthermore the location of the chain and glue can change
10159   // when the AnyReg calling convention is used and the intrinsic returns a
10160   // value.
10161   if (IsAnyRegCC && HasDef) {
10162     SDValue From[] = {SDValue(Call, 0), SDValue(Call, 1)};
10163     SDValue To[] = {PPV.getValue(1), PPV.getValue(2)};
10164     DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
10165   } else
10166     DAG.ReplaceAllUsesWith(Call, PPV.getNode());
10167   DAG.DeleteNode(Call);
10168 
10169   // Inform the Frame Information that we have a patchpoint in this function.
10170   FuncInfo.MF->getFrameInfo().setHasPatchPoint();
10171 }
10172 
10173 void SelectionDAGBuilder::visitVectorReduce(const CallInst &I,
10174                                             unsigned Intrinsic) {
10175   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10176   SDValue Op1 = getValue(I.getArgOperand(0));
10177   SDValue Op2;
10178   if (I.arg_size() > 1)
10179     Op2 = getValue(I.getArgOperand(1));
10180   SDLoc dl = getCurSDLoc();
10181   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
10182   SDValue Res;
10183   SDNodeFlags SDFlags;
10184   if (auto *FPMO = dyn_cast<FPMathOperator>(&I))
10185     SDFlags.copyFMF(*FPMO);
10186 
10187   switch (Intrinsic) {
10188   case Intrinsic::vector_reduce_fadd:
10189     if (SDFlags.hasAllowReassociation())
10190       Res = DAG.getNode(ISD::FADD, dl, VT, Op1,
10191                         DAG.getNode(ISD::VECREDUCE_FADD, dl, VT, Op2, SDFlags),
10192                         SDFlags);
10193     else
10194       Res = DAG.getNode(ISD::VECREDUCE_SEQ_FADD, dl, VT, Op1, Op2, SDFlags);
10195     break;
10196   case Intrinsic::vector_reduce_fmul:
10197     if (SDFlags.hasAllowReassociation())
10198       Res = DAG.getNode(ISD::FMUL, dl, VT, Op1,
10199                         DAG.getNode(ISD::VECREDUCE_FMUL, dl, VT, Op2, SDFlags),
10200                         SDFlags);
10201     else
10202       Res = DAG.getNode(ISD::VECREDUCE_SEQ_FMUL, dl, VT, Op1, Op2, SDFlags);
10203     break;
10204   case Intrinsic::vector_reduce_add:
10205     Res = DAG.getNode(ISD::VECREDUCE_ADD, dl, VT, Op1);
10206     break;
10207   case Intrinsic::vector_reduce_mul:
10208     Res = DAG.getNode(ISD::VECREDUCE_MUL, dl, VT, Op1);
10209     break;
10210   case Intrinsic::vector_reduce_and:
10211     Res = DAG.getNode(ISD::VECREDUCE_AND, dl, VT, Op1);
10212     break;
10213   case Intrinsic::vector_reduce_or:
10214     Res = DAG.getNode(ISD::VECREDUCE_OR, dl, VT, Op1);
10215     break;
10216   case Intrinsic::vector_reduce_xor:
10217     Res = DAG.getNode(ISD::VECREDUCE_XOR, dl, VT, Op1);
10218     break;
10219   case Intrinsic::vector_reduce_smax:
10220     Res = DAG.getNode(ISD::VECREDUCE_SMAX, dl, VT, Op1);
10221     break;
10222   case Intrinsic::vector_reduce_smin:
10223     Res = DAG.getNode(ISD::VECREDUCE_SMIN, dl, VT, Op1);
10224     break;
10225   case Intrinsic::vector_reduce_umax:
10226     Res = DAG.getNode(ISD::VECREDUCE_UMAX, dl, VT, Op1);
10227     break;
10228   case Intrinsic::vector_reduce_umin:
10229     Res = DAG.getNode(ISD::VECREDUCE_UMIN, dl, VT, Op1);
10230     break;
10231   case Intrinsic::vector_reduce_fmax:
10232     Res = DAG.getNode(ISD::VECREDUCE_FMAX, dl, VT, Op1, SDFlags);
10233     break;
10234   case Intrinsic::vector_reduce_fmin:
10235     Res = DAG.getNode(ISD::VECREDUCE_FMIN, dl, VT, Op1, SDFlags);
10236     break;
10237   case Intrinsic::vector_reduce_fmaximum:
10238     Res = DAG.getNode(ISD::VECREDUCE_FMAXIMUM, dl, VT, Op1, SDFlags);
10239     break;
10240   case Intrinsic::vector_reduce_fminimum:
10241     Res = DAG.getNode(ISD::VECREDUCE_FMINIMUM, dl, VT, Op1, SDFlags);
10242     break;
10243   default:
10244     llvm_unreachable("Unhandled vector reduce intrinsic");
10245   }
10246   setValue(&I, Res);
10247 }
10248 
10249 /// Returns an AttributeList representing the attributes applied to the return
10250 /// value of the given call.
10251 static AttributeList getReturnAttrs(TargetLowering::CallLoweringInfo &CLI) {
10252   SmallVector<Attribute::AttrKind, 2> Attrs;
10253   if (CLI.RetSExt)
10254     Attrs.push_back(Attribute::SExt);
10255   if (CLI.RetZExt)
10256     Attrs.push_back(Attribute::ZExt);
10257   if (CLI.IsInReg)
10258     Attrs.push_back(Attribute::InReg);
10259 
10260   return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex,
10261                             Attrs);
10262 }
10263 
10264 /// TargetLowering::LowerCallTo - This is the default LowerCallTo
10265 /// implementation, which just calls LowerCall.
10266 /// FIXME: When all targets are
10267 /// migrated to using LowerCall, this hook should be integrated into SDISel.
10268 std::pair<SDValue, SDValue>
10269 TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
10270   // Handle the incoming return values from the call.
10271   CLI.Ins.clear();
10272   Type *OrigRetTy = CLI.RetTy;
10273   SmallVector<EVT, 4> RetTys;
10274   SmallVector<uint64_t, 4> Offsets;
10275   auto &DL = CLI.DAG.getDataLayout();
10276   ComputeValueVTs(*this, DL, CLI.RetTy, RetTys, &Offsets, 0);
10277 
10278   if (CLI.IsPostTypeLegalization) {
10279     // If we are lowering a libcall after legalization, split the return type.
10280     SmallVector<EVT, 4> OldRetTys;
10281     SmallVector<uint64_t, 4> OldOffsets;
10282     RetTys.swap(OldRetTys);
10283     Offsets.swap(OldOffsets);
10284 
10285     for (size_t i = 0, e = OldRetTys.size(); i != e; ++i) {
10286       EVT RetVT = OldRetTys[i];
10287       uint64_t Offset = OldOffsets[i];
10288       MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), RetVT);
10289       unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), RetVT);
10290       unsigned RegisterVTByteSZ = RegisterVT.getSizeInBits() / 8;
10291       RetTys.append(NumRegs, RegisterVT);
10292       for (unsigned j = 0; j != NumRegs; ++j)
10293         Offsets.push_back(Offset + j * RegisterVTByteSZ);
10294     }
10295   }
10296 
10297   SmallVector<ISD::OutputArg, 4> Outs;
10298   GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, *this, DL);
10299 
10300   bool CanLowerReturn =
10301       this->CanLowerReturn(CLI.CallConv, CLI.DAG.getMachineFunction(),
10302                            CLI.IsVarArg, Outs, CLI.RetTy->getContext());
10303 
10304   SDValue DemoteStackSlot;
10305   int DemoteStackIdx = -100;
10306   if (!CanLowerReturn) {
10307     // FIXME: equivalent assert?
10308     // assert(!CS.hasInAllocaArgument() &&
10309     //        "sret demotion is incompatible with inalloca");
10310     uint64_t TySize = DL.getTypeAllocSize(CLI.RetTy);
10311     Align Alignment = DL.getPrefTypeAlign(CLI.RetTy);
10312     MachineFunction &MF = CLI.DAG.getMachineFunction();
10313     DemoteStackIdx =
10314         MF.getFrameInfo().CreateStackObject(TySize, Alignment, false);
10315     Type *StackSlotPtrType = PointerType::get(CLI.RetTy,
10316                                               DL.getAllocaAddrSpace());
10317 
10318     DemoteStackSlot = CLI.DAG.getFrameIndex(DemoteStackIdx, getFrameIndexTy(DL));
10319     ArgListEntry Entry;
10320     Entry.Node = DemoteStackSlot;
10321     Entry.Ty = StackSlotPtrType;
10322     Entry.IsSExt = false;
10323     Entry.IsZExt = false;
10324     Entry.IsInReg = false;
10325     Entry.IsSRet = true;
10326     Entry.IsNest = false;
10327     Entry.IsByVal = false;
10328     Entry.IsByRef = false;
10329     Entry.IsReturned = false;
10330     Entry.IsSwiftSelf = false;
10331     Entry.IsSwiftAsync = false;
10332     Entry.IsSwiftError = false;
10333     Entry.IsCFGuardTarget = false;
10334     Entry.Alignment = Alignment;
10335     CLI.getArgs().insert(CLI.getArgs().begin(), Entry);
10336     CLI.NumFixedArgs += 1;
10337     CLI.getArgs()[0].IndirectType = CLI.RetTy;
10338     CLI.RetTy = Type::getVoidTy(CLI.RetTy->getContext());
10339 
10340     // sret demotion isn't compatible with tail-calls, since the sret argument
10341     // points into the callers stack frame.
10342     CLI.IsTailCall = false;
10343   } else {
10344     bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
10345         CLI.RetTy, CLI.CallConv, CLI.IsVarArg, DL);
10346     for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
10347       ISD::ArgFlagsTy Flags;
10348       if (NeedsRegBlock) {
10349         Flags.setInConsecutiveRegs();
10350         if (I == RetTys.size() - 1)
10351           Flags.setInConsecutiveRegsLast();
10352       }
10353       EVT VT = RetTys[I];
10354       MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
10355                                                      CLI.CallConv, VT);
10356       unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
10357                                                        CLI.CallConv, VT);
10358       for (unsigned i = 0; i != NumRegs; ++i) {
10359         ISD::InputArg MyFlags;
10360         MyFlags.Flags = Flags;
10361         MyFlags.VT = RegisterVT;
10362         MyFlags.ArgVT = VT;
10363         MyFlags.Used = CLI.IsReturnValueUsed;
10364         if (CLI.RetTy->isPointerTy()) {
10365           MyFlags.Flags.setPointer();
10366           MyFlags.Flags.setPointerAddrSpace(
10367               cast<PointerType>(CLI.RetTy)->getAddressSpace());
10368         }
10369         if (CLI.RetSExt)
10370           MyFlags.Flags.setSExt();
10371         if (CLI.RetZExt)
10372           MyFlags.Flags.setZExt();
10373         if (CLI.IsInReg)
10374           MyFlags.Flags.setInReg();
10375         CLI.Ins.push_back(MyFlags);
10376       }
10377     }
10378   }
10379 
10380   // We push in swifterror return as the last element of CLI.Ins.
10381   ArgListTy &Args = CLI.getArgs();
10382   if (supportSwiftError()) {
10383     for (const ArgListEntry &Arg : Args) {
10384       if (Arg.IsSwiftError) {
10385         ISD::InputArg MyFlags;
10386         MyFlags.VT = getPointerTy(DL);
10387         MyFlags.ArgVT = EVT(getPointerTy(DL));
10388         MyFlags.Flags.setSwiftError();
10389         CLI.Ins.push_back(MyFlags);
10390       }
10391     }
10392   }
10393 
10394   // Handle all of the outgoing arguments.
10395   CLI.Outs.clear();
10396   CLI.OutVals.clear();
10397   for (unsigned i = 0, e = Args.size(); i != e; ++i) {
10398     SmallVector<EVT, 4> ValueVTs;
10399     ComputeValueVTs(*this, DL, Args[i].Ty, ValueVTs);
10400     // FIXME: Split arguments if CLI.IsPostTypeLegalization
10401     Type *FinalType = Args[i].Ty;
10402     if (Args[i].IsByVal)
10403       FinalType = Args[i].IndirectType;
10404     bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
10405         FinalType, CLI.CallConv, CLI.IsVarArg, DL);
10406     for (unsigned Value = 0, NumValues = ValueVTs.size(); Value != NumValues;
10407          ++Value) {
10408       EVT VT = ValueVTs[Value];
10409       Type *ArgTy = VT.getTypeForEVT(CLI.RetTy->getContext());
10410       SDValue Op = SDValue(Args[i].Node.getNode(),
10411                            Args[i].Node.getResNo() + Value);
10412       ISD::ArgFlagsTy Flags;
10413 
10414       // Certain targets (such as MIPS), may have a different ABI alignment
10415       // for a type depending on the context. Give the target a chance to
10416       // specify the alignment it wants.
10417       const Align OriginalAlignment(getABIAlignmentForCallingConv(ArgTy, DL));
10418       Flags.setOrigAlign(OriginalAlignment);
10419 
10420       if (Args[i].Ty->isPointerTy()) {
10421         Flags.setPointer();
10422         Flags.setPointerAddrSpace(
10423             cast<PointerType>(Args[i].Ty)->getAddressSpace());
10424       }
10425       if (Args[i].IsZExt)
10426         Flags.setZExt();
10427       if (Args[i].IsSExt)
10428         Flags.setSExt();
10429       if (Args[i].IsInReg) {
10430         // If we are using vectorcall calling convention, a structure that is
10431         // passed InReg - is surely an HVA
10432         if (CLI.CallConv == CallingConv::X86_VectorCall &&
10433             isa<StructType>(FinalType)) {
10434           // The first value of a structure is marked
10435           if (0 == Value)
10436             Flags.setHvaStart();
10437           Flags.setHva();
10438         }
10439         // Set InReg Flag
10440         Flags.setInReg();
10441       }
10442       if (Args[i].IsSRet)
10443         Flags.setSRet();
10444       if (Args[i].IsSwiftSelf)
10445         Flags.setSwiftSelf();
10446       if (Args[i].IsSwiftAsync)
10447         Flags.setSwiftAsync();
10448       if (Args[i].IsSwiftError)
10449         Flags.setSwiftError();
10450       if (Args[i].IsCFGuardTarget)
10451         Flags.setCFGuardTarget();
10452       if (Args[i].IsByVal)
10453         Flags.setByVal();
10454       if (Args[i].IsByRef)
10455         Flags.setByRef();
10456       if (Args[i].IsPreallocated) {
10457         Flags.setPreallocated();
10458         // Set the byval flag for CCAssignFn callbacks that don't know about
10459         // preallocated.  This way we can know how many bytes we should've
10460         // allocated and how many bytes a callee cleanup function will pop.  If
10461         // we port preallocated to more targets, we'll have to add custom
10462         // preallocated handling in the various CC lowering callbacks.
10463         Flags.setByVal();
10464       }
10465       if (Args[i].IsInAlloca) {
10466         Flags.setInAlloca();
10467         // Set the byval flag for CCAssignFn callbacks that don't know about
10468         // inalloca.  This way we can know how many bytes we should've allocated
10469         // and how many bytes a callee cleanup function will pop.  If we port
10470         // inalloca to more targets, we'll have to add custom inalloca handling
10471         // in the various CC lowering callbacks.
10472         Flags.setByVal();
10473       }
10474       Align MemAlign;
10475       if (Args[i].IsByVal || Args[i].IsInAlloca || Args[i].IsPreallocated) {
10476         unsigned FrameSize = DL.getTypeAllocSize(Args[i].IndirectType);
10477         Flags.setByValSize(FrameSize);
10478 
10479         // info is not there but there are cases it cannot get right.
10480         if (auto MA = Args[i].Alignment)
10481           MemAlign = *MA;
10482         else
10483           MemAlign = Align(getByValTypeAlignment(Args[i].IndirectType, DL));
10484       } else if (auto MA = Args[i].Alignment) {
10485         MemAlign = *MA;
10486       } else {
10487         MemAlign = OriginalAlignment;
10488       }
10489       Flags.setMemAlign(MemAlign);
10490       if (Args[i].IsNest)
10491         Flags.setNest();
10492       if (NeedsRegBlock)
10493         Flags.setInConsecutiveRegs();
10494 
10495       MVT PartVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
10496                                                  CLI.CallConv, VT);
10497       unsigned NumParts = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
10498                                                         CLI.CallConv, VT);
10499       SmallVector<SDValue, 4> Parts(NumParts);
10500       ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
10501 
10502       if (Args[i].IsSExt)
10503         ExtendKind = ISD::SIGN_EXTEND;
10504       else if (Args[i].IsZExt)
10505         ExtendKind = ISD::ZERO_EXTEND;
10506 
10507       // Conservatively only handle 'returned' on non-vectors that can be lowered,
10508       // for now.
10509       if (Args[i].IsReturned && !Op.getValueType().isVector() &&
10510           CanLowerReturn) {
10511         assert((CLI.RetTy == Args[i].Ty ||
10512                 (CLI.RetTy->isPointerTy() && Args[i].Ty->isPointerTy() &&
10513                  CLI.RetTy->getPointerAddressSpace() ==
10514                      Args[i].Ty->getPointerAddressSpace())) &&
10515                RetTys.size() == NumValues && "unexpected use of 'returned'");
10516         // Before passing 'returned' to the target lowering code, ensure that
10517         // either the register MVT and the actual EVT are the same size or that
10518         // the return value and argument are extended in the same way; in these
10519         // cases it's safe to pass the argument register value unchanged as the
10520         // return register value (although it's at the target's option whether
10521         // to do so)
10522         // TODO: allow code generation to take advantage of partially preserved
10523         // registers rather than clobbering the entire register when the
10524         // parameter extension method is not compatible with the return
10525         // extension method
10526         if ((NumParts * PartVT.getSizeInBits() == VT.getSizeInBits()) ||
10527             (ExtendKind != ISD::ANY_EXTEND && CLI.RetSExt == Args[i].IsSExt &&
10528              CLI.RetZExt == Args[i].IsZExt))
10529           Flags.setReturned();
10530       }
10531 
10532       getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT, CLI.CB,
10533                      CLI.CallConv, ExtendKind);
10534 
10535       for (unsigned j = 0; j != NumParts; ++j) {
10536         // if it isn't first piece, alignment must be 1
10537         // For scalable vectors the scalable part is currently handled
10538         // by individual targets, so we just use the known minimum size here.
10539         ISD::OutputArg MyFlags(
10540             Flags, Parts[j].getValueType().getSimpleVT(), VT,
10541             i < CLI.NumFixedArgs, i,
10542             j * Parts[j].getValueType().getStoreSize().getKnownMinValue());
10543         if (NumParts > 1 && j == 0)
10544           MyFlags.Flags.setSplit();
10545         else if (j != 0) {
10546           MyFlags.Flags.setOrigAlign(Align(1));
10547           if (j == NumParts - 1)
10548             MyFlags.Flags.setSplitEnd();
10549         }
10550 
10551         CLI.Outs.push_back(MyFlags);
10552         CLI.OutVals.push_back(Parts[j]);
10553       }
10554 
10555       if (NeedsRegBlock && Value == NumValues - 1)
10556         CLI.Outs[CLI.Outs.size() - 1].Flags.setInConsecutiveRegsLast();
10557     }
10558   }
10559 
10560   SmallVector<SDValue, 4> InVals;
10561   CLI.Chain = LowerCall(CLI, InVals);
10562 
10563   // Update CLI.InVals to use outside of this function.
10564   CLI.InVals = InVals;
10565 
10566   // Verify that the target's LowerCall behaved as expected.
10567   assert(CLI.Chain.getNode() && CLI.Chain.getValueType() == MVT::Other &&
10568          "LowerCall didn't return a valid chain!");
10569   assert((!CLI.IsTailCall || InVals.empty()) &&
10570          "LowerCall emitted a return value for a tail call!");
10571   assert((CLI.IsTailCall || InVals.size() == CLI.Ins.size()) &&
10572          "LowerCall didn't emit the correct number of values!");
10573 
10574   // For a tail call, the return value is merely live-out and there aren't
10575   // any nodes in the DAG representing it. Return a special value to
10576   // indicate that a tail call has been emitted and no more Instructions
10577   // should be processed in the current block.
10578   if (CLI.IsTailCall) {
10579     CLI.DAG.setRoot(CLI.Chain);
10580     return std::make_pair(SDValue(), SDValue());
10581   }
10582 
10583 #ifndef NDEBUG
10584   for (unsigned i = 0, e = CLI.Ins.size(); i != e; ++i) {
10585     assert(InVals[i].getNode() && "LowerCall emitted a null value!");
10586     assert(EVT(CLI.Ins[i].VT) == InVals[i].getValueType() &&
10587            "LowerCall emitted a value with the wrong type!");
10588   }
10589 #endif
10590 
10591   SmallVector<SDValue, 4> ReturnValues;
10592   if (!CanLowerReturn) {
10593     // The instruction result is the result of loading from the
10594     // hidden sret parameter.
10595     SmallVector<EVT, 1> PVTs;
10596     Type *PtrRetTy =
10597         PointerType::get(OrigRetTy->getContext(), DL.getAllocaAddrSpace());
10598 
10599     ComputeValueVTs(*this, DL, PtrRetTy, PVTs);
10600     assert(PVTs.size() == 1 && "Pointers should fit in one register");
10601     EVT PtrVT = PVTs[0];
10602 
10603     unsigned NumValues = RetTys.size();
10604     ReturnValues.resize(NumValues);
10605     SmallVector<SDValue, 4> Chains(NumValues);
10606 
10607     // An aggregate return value cannot wrap around the address space, so
10608     // offsets to its parts don't wrap either.
10609     SDNodeFlags Flags;
10610     Flags.setNoUnsignedWrap(true);
10611 
10612     MachineFunction &MF = CLI.DAG.getMachineFunction();
10613     Align HiddenSRetAlign = MF.getFrameInfo().getObjectAlign(DemoteStackIdx);
10614     for (unsigned i = 0; i < NumValues; ++i) {
10615       SDValue Add = CLI.DAG.getNode(ISD::ADD, CLI.DL, PtrVT, DemoteStackSlot,
10616                                     CLI.DAG.getConstant(Offsets[i], CLI.DL,
10617                                                         PtrVT), Flags);
10618       SDValue L = CLI.DAG.getLoad(
10619           RetTys[i], CLI.DL, CLI.Chain, Add,
10620           MachinePointerInfo::getFixedStack(CLI.DAG.getMachineFunction(),
10621                                             DemoteStackIdx, Offsets[i]),
10622           HiddenSRetAlign);
10623       ReturnValues[i] = L;
10624       Chains[i] = L.getValue(1);
10625     }
10626 
10627     CLI.Chain = CLI.DAG.getNode(ISD::TokenFactor, CLI.DL, MVT::Other, Chains);
10628   } else {
10629     // Collect the legal value parts into potentially illegal values
10630     // that correspond to the original function's return values.
10631     std::optional<ISD::NodeType> AssertOp;
10632     if (CLI.RetSExt)
10633       AssertOp = ISD::AssertSext;
10634     else if (CLI.RetZExt)
10635       AssertOp = ISD::AssertZext;
10636     unsigned CurReg = 0;
10637     for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
10638       EVT VT = RetTys[I];
10639       MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
10640                                                      CLI.CallConv, VT);
10641       unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
10642                                                        CLI.CallConv, VT);
10643 
10644       ReturnValues.push_back(getCopyFromParts(CLI.DAG, CLI.DL, &InVals[CurReg],
10645                                               NumRegs, RegisterVT, VT, nullptr,
10646                                               CLI.CallConv, AssertOp));
10647       CurReg += NumRegs;
10648     }
10649 
10650     // For a function returning void, there is no return value. We can't create
10651     // such a node, so we just return a null return value in that case. In
10652     // that case, nothing will actually look at the value.
10653     if (ReturnValues.empty())
10654       return std::make_pair(SDValue(), CLI.Chain);
10655   }
10656 
10657   SDValue Res = CLI.DAG.getNode(ISD::MERGE_VALUES, CLI.DL,
10658                                 CLI.DAG.getVTList(RetTys), ReturnValues);
10659   return std::make_pair(Res, CLI.Chain);
10660 }
10661 
10662 /// Places new result values for the node in Results (their number
10663 /// and types must exactly match those of the original return values of
10664 /// the node), or leaves Results empty, which indicates that the node is not
10665 /// to be custom lowered after all.
10666 void TargetLowering::LowerOperationWrapper(SDNode *N,
10667                                            SmallVectorImpl<SDValue> &Results,
10668                                            SelectionDAG &DAG) const {
10669   SDValue Res = LowerOperation(SDValue(N, 0), DAG);
10670 
10671   if (!Res.getNode())
10672     return;
10673 
10674   // If the original node has one result, take the return value from
10675   // LowerOperation as is. It might not be result number 0.
10676   if (N->getNumValues() == 1) {
10677     Results.push_back(Res);
10678     return;
10679   }
10680 
10681   // If the original node has multiple results, then the return node should
10682   // have the same number of results.
10683   assert((N->getNumValues() == Res->getNumValues()) &&
10684       "Lowering returned the wrong number of results!");
10685 
10686   // Places new result values base on N result number.
10687   for (unsigned I = 0, E = N->getNumValues(); I != E; ++I)
10688     Results.push_back(Res.getValue(I));
10689 }
10690 
10691 SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
10692   llvm_unreachable("LowerOperation not implemented for this target!");
10693 }
10694 
10695 void SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V,
10696                                                      unsigned Reg,
10697                                                      ISD::NodeType ExtendType) {
10698   SDValue Op = getNonRegisterValue(V);
10699   assert((Op.getOpcode() != ISD::CopyFromReg ||
10700           cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
10701          "Copy from a reg to the same reg!");
10702   assert(!Register::isPhysicalRegister(Reg) && "Is a physreg");
10703 
10704   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10705   // If this is an InlineAsm we have to match the registers required, not the
10706   // notional registers required by the type.
10707 
10708   RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg, V->getType(),
10709                    std::nullopt); // This is not an ABI copy.
10710   SDValue Chain = DAG.getEntryNode();
10711 
10712   if (ExtendType == ISD::ANY_EXTEND) {
10713     auto PreferredExtendIt = FuncInfo.PreferredExtendType.find(V);
10714     if (PreferredExtendIt != FuncInfo.PreferredExtendType.end())
10715       ExtendType = PreferredExtendIt->second;
10716   }
10717   RFV.getCopyToRegs(Op, DAG, getCurSDLoc(), Chain, nullptr, V, ExtendType);
10718   PendingExports.push_back(Chain);
10719 }
10720 
10721 #include "llvm/CodeGen/SelectionDAGISel.h"
10722 
10723 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
10724 /// entry block, return true.  This includes arguments used by switches, since
10725 /// the switch may expand into multiple basic blocks.
10726 static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) {
10727   // With FastISel active, we may be splitting blocks, so force creation
10728   // of virtual registers for all non-dead arguments.
10729   if (FastISel)
10730     return A->use_empty();
10731 
10732   const BasicBlock &Entry = A->getParent()->front();
10733   for (const User *U : A->users())
10734     if (cast<Instruction>(U)->getParent() != &Entry || isa<SwitchInst>(U))
10735       return false;  // Use not in entry block.
10736 
10737   return true;
10738 }
10739 
10740 using ArgCopyElisionMapTy =
10741     DenseMap<const Argument *,
10742              std::pair<const AllocaInst *, const StoreInst *>>;
10743 
10744 /// Scan the entry block of the function in FuncInfo for arguments that look
10745 /// like copies into a local alloca. Record any copied arguments in
10746 /// ArgCopyElisionCandidates.
10747 static void
10748 findArgumentCopyElisionCandidates(const DataLayout &DL,
10749                                   FunctionLoweringInfo *FuncInfo,
10750                                   ArgCopyElisionMapTy &ArgCopyElisionCandidates) {
10751   // Record the state of every static alloca used in the entry block. Argument
10752   // allocas are all used in the entry block, so we need approximately as many
10753   // entries as we have arguments.
10754   enum StaticAllocaInfo { Unknown, Clobbered, Elidable };
10755   SmallDenseMap<const AllocaInst *, StaticAllocaInfo, 8> StaticAllocas;
10756   unsigned NumArgs = FuncInfo->Fn->arg_size();
10757   StaticAllocas.reserve(NumArgs * 2);
10758 
10759   auto GetInfoIfStaticAlloca = [&](const Value *V) -> StaticAllocaInfo * {
10760     if (!V)
10761       return nullptr;
10762     V = V->stripPointerCasts();
10763     const auto *AI = dyn_cast<AllocaInst>(V);
10764     if (!AI || !AI->isStaticAlloca() || !FuncInfo->StaticAllocaMap.count(AI))
10765       return nullptr;
10766     auto Iter = StaticAllocas.insert({AI, Unknown});
10767     return &Iter.first->second;
10768   };
10769 
10770   // Look for stores of arguments to static allocas. Look through bitcasts and
10771   // GEPs to handle type coercions, as long as the alloca is fully initialized
10772   // by the store. Any non-store use of an alloca escapes it and any subsequent
10773   // unanalyzed store might write it.
10774   // FIXME: Handle structs initialized with multiple stores.
10775   for (const Instruction &I : FuncInfo->Fn->getEntryBlock()) {
10776     // Look for stores, and handle non-store uses conservatively.
10777     const auto *SI = dyn_cast<StoreInst>(&I);
10778     if (!SI) {
10779       // We will look through cast uses, so ignore them completely.
10780       if (I.isCast())
10781         continue;
10782       // Ignore debug info and pseudo op intrinsics, they don't escape or store
10783       // to allocas.
10784       if (I.isDebugOrPseudoInst())
10785         continue;
10786       // This is an unknown instruction. Assume it escapes or writes to all
10787       // static alloca operands.
10788       for (const Use &U : I.operands()) {
10789         if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(U))
10790           *Info = StaticAllocaInfo::Clobbered;
10791       }
10792       continue;
10793     }
10794 
10795     // If the stored value is a static alloca, mark it as escaped.
10796     if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(SI->getValueOperand()))
10797       *Info = StaticAllocaInfo::Clobbered;
10798 
10799     // Check if the destination is a static alloca.
10800     const Value *Dst = SI->getPointerOperand()->stripPointerCasts();
10801     StaticAllocaInfo *Info = GetInfoIfStaticAlloca(Dst);
10802     if (!Info)
10803       continue;
10804     const AllocaInst *AI = cast<AllocaInst>(Dst);
10805 
10806     // Skip allocas that have been initialized or clobbered.
10807     if (*Info != StaticAllocaInfo::Unknown)
10808       continue;
10809 
10810     // Check if the stored value is an argument, and that this store fully
10811     // initializes the alloca.
10812     // If the argument type has padding bits we can't directly forward a pointer
10813     // as the upper bits may contain garbage.
10814     // Don't elide copies from the same argument twice.
10815     const Value *Val = SI->getValueOperand()->stripPointerCasts();
10816     const auto *Arg = dyn_cast<Argument>(Val);
10817     if (!Arg || Arg->hasPassPointeeByValueCopyAttr() ||
10818         Arg->getType()->isEmptyTy() ||
10819         DL.getTypeStoreSize(Arg->getType()) !=
10820             DL.getTypeAllocSize(AI->getAllocatedType()) ||
10821         !DL.typeSizeEqualsStoreSize(Arg->getType()) ||
10822         ArgCopyElisionCandidates.count(Arg)) {
10823       *Info = StaticAllocaInfo::Clobbered;
10824       continue;
10825     }
10826 
10827     LLVM_DEBUG(dbgs() << "Found argument copy elision candidate: " << *AI
10828                       << '\n');
10829 
10830     // Mark this alloca and store for argument copy elision.
10831     *Info = StaticAllocaInfo::Elidable;
10832     ArgCopyElisionCandidates.insert({Arg, {AI, SI}});
10833 
10834     // Stop scanning if we've seen all arguments. This will happen early in -O0
10835     // builds, which is useful, because -O0 builds have large entry blocks and
10836     // many allocas.
10837     if (ArgCopyElisionCandidates.size() == NumArgs)
10838       break;
10839   }
10840 }
10841 
10842 /// Try to elide argument copies from memory into a local alloca. Succeeds if
10843 /// ArgVal is a load from a suitable fixed stack object.
10844 static void tryToElideArgumentCopy(
10845     FunctionLoweringInfo &FuncInfo, SmallVectorImpl<SDValue> &Chains,
10846     DenseMap<int, int> &ArgCopyElisionFrameIndexMap,
10847     SmallPtrSetImpl<const Instruction *> &ElidedArgCopyInstrs,
10848     ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg,
10849     ArrayRef<SDValue> ArgVals, bool &ArgHasUses) {
10850   // Check if this is a load from a fixed stack object.
10851   auto *LNode = dyn_cast<LoadSDNode>(ArgVals[0]);
10852   if (!LNode)
10853     return;
10854   auto *FINode = dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode());
10855   if (!FINode)
10856     return;
10857 
10858   // Check that the fixed stack object is the right size and alignment.
10859   // Look at the alignment that the user wrote on the alloca instead of looking
10860   // at the stack object.
10861   auto ArgCopyIter = ArgCopyElisionCandidates.find(&Arg);
10862   assert(ArgCopyIter != ArgCopyElisionCandidates.end());
10863   const AllocaInst *AI = ArgCopyIter->second.first;
10864   int FixedIndex = FINode->getIndex();
10865   int &AllocaIndex = FuncInfo.StaticAllocaMap[AI];
10866   int OldIndex = AllocaIndex;
10867   MachineFrameInfo &MFI = FuncInfo.MF->getFrameInfo();
10868   if (MFI.getObjectSize(FixedIndex) != MFI.getObjectSize(OldIndex)) {
10869     LLVM_DEBUG(
10870         dbgs() << "  argument copy elision failed due to bad fixed stack "
10871                   "object size\n");
10872     return;
10873   }
10874   Align RequiredAlignment = AI->getAlign();
10875   if (MFI.getObjectAlign(FixedIndex) < RequiredAlignment) {
10876     LLVM_DEBUG(dbgs() << "  argument copy elision failed: alignment of alloca "
10877                          "greater than stack argument alignment ("
10878                       << DebugStr(RequiredAlignment) << " vs "
10879                       << DebugStr(MFI.getObjectAlign(FixedIndex)) << ")\n");
10880     return;
10881   }
10882 
10883   // Perform the elision. Delete the old stack object and replace its only use
10884   // in the variable info map. Mark the stack object as mutable.
10885   LLVM_DEBUG({
10886     dbgs() << "Eliding argument copy from " << Arg << " to " << *AI << '\n'
10887            << "  Replacing frame index " << OldIndex << " with " << FixedIndex
10888            << '\n';
10889   });
10890   MFI.RemoveStackObject(OldIndex);
10891   MFI.setIsImmutableObjectIndex(FixedIndex, false);
10892   AllocaIndex = FixedIndex;
10893   ArgCopyElisionFrameIndexMap.insert({OldIndex, FixedIndex});
10894   for (SDValue ArgVal : ArgVals)
10895     Chains.push_back(ArgVal.getValue(1));
10896 
10897   // Avoid emitting code for the store implementing the copy.
10898   const StoreInst *SI = ArgCopyIter->second.second;
10899   ElidedArgCopyInstrs.insert(SI);
10900 
10901   // Check for uses of the argument again so that we can avoid exporting ArgVal
10902   // if it is't used by anything other than the store.
10903   for (const Value *U : Arg.users()) {
10904     if (U != SI) {
10905       ArgHasUses = true;
10906       break;
10907     }
10908   }
10909 }
10910 
10911 void SelectionDAGISel::LowerArguments(const Function &F) {
10912   SelectionDAG &DAG = SDB->DAG;
10913   SDLoc dl = SDB->getCurSDLoc();
10914   const DataLayout &DL = DAG.getDataLayout();
10915   SmallVector<ISD::InputArg, 16> Ins;
10916 
10917   // In Naked functions we aren't going to save any registers.
10918   if (F.hasFnAttribute(Attribute::Naked))
10919     return;
10920 
10921   if (!FuncInfo->CanLowerReturn) {
10922     // Put in an sret pointer parameter before all the other parameters.
10923     SmallVector<EVT, 1> ValueVTs;
10924     ComputeValueVTs(*TLI, DAG.getDataLayout(),
10925                     PointerType::get(F.getContext(),
10926                                      DAG.getDataLayout().getAllocaAddrSpace()),
10927                     ValueVTs);
10928 
10929     // NOTE: Assuming that a pointer will never break down to more than one VT
10930     // or one register.
10931     ISD::ArgFlagsTy Flags;
10932     Flags.setSRet();
10933     MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVTs[0]);
10934     ISD::InputArg RetArg(Flags, RegisterVT, ValueVTs[0], true,
10935                          ISD::InputArg::NoArgIndex, 0);
10936     Ins.push_back(RetArg);
10937   }
10938 
10939   // Look for stores of arguments to static allocas. Mark such arguments with a
10940   // flag to ask the target to give us the memory location of that argument if
10941   // available.
10942   ArgCopyElisionMapTy ArgCopyElisionCandidates;
10943   findArgumentCopyElisionCandidates(DL, FuncInfo.get(),
10944                                     ArgCopyElisionCandidates);
10945 
10946   // Set up the incoming argument description vector.
10947   for (const Argument &Arg : F.args()) {
10948     unsigned ArgNo = Arg.getArgNo();
10949     SmallVector<EVT, 4> ValueVTs;
10950     ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
10951     bool isArgValueUsed = !Arg.use_empty();
10952     unsigned PartBase = 0;
10953     Type *FinalType = Arg.getType();
10954     if (Arg.hasAttribute(Attribute::ByVal))
10955       FinalType = Arg.getParamByValType();
10956     bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
10957         FinalType, F.getCallingConv(), F.isVarArg(), DL);
10958     for (unsigned Value = 0, NumValues = ValueVTs.size();
10959          Value != NumValues; ++Value) {
10960       EVT VT = ValueVTs[Value];
10961       Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
10962       ISD::ArgFlagsTy Flags;
10963 
10964 
10965       if (Arg.getType()->isPointerTy()) {
10966         Flags.setPointer();
10967         Flags.setPointerAddrSpace(
10968             cast<PointerType>(Arg.getType())->getAddressSpace());
10969       }
10970       if (Arg.hasAttribute(Attribute::ZExt))
10971         Flags.setZExt();
10972       if (Arg.hasAttribute(Attribute::SExt))
10973         Flags.setSExt();
10974       if (Arg.hasAttribute(Attribute::InReg)) {
10975         // If we are using vectorcall calling convention, a structure that is
10976         // passed InReg - is surely an HVA
10977         if (F.getCallingConv() == CallingConv::X86_VectorCall &&
10978             isa<StructType>(Arg.getType())) {
10979           // The first value of a structure is marked
10980           if (0 == Value)
10981             Flags.setHvaStart();
10982           Flags.setHva();
10983         }
10984         // Set InReg Flag
10985         Flags.setInReg();
10986       }
10987       if (Arg.hasAttribute(Attribute::StructRet))
10988         Flags.setSRet();
10989       if (Arg.hasAttribute(Attribute::SwiftSelf))
10990         Flags.setSwiftSelf();
10991       if (Arg.hasAttribute(Attribute::SwiftAsync))
10992         Flags.setSwiftAsync();
10993       if (Arg.hasAttribute(Attribute::SwiftError))
10994         Flags.setSwiftError();
10995       if (Arg.hasAttribute(Attribute::ByVal))
10996         Flags.setByVal();
10997       if (Arg.hasAttribute(Attribute::ByRef))
10998         Flags.setByRef();
10999       if (Arg.hasAttribute(Attribute::InAlloca)) {
11000         Flags.setInAlloca();
11001         // Set the byval flag for CCAssignFn callbacks that don't know about
11002         // inalloca.  This way we can know how many bytes we should've allocated
11003         // and how many bytes a callee cleanup function will pop.  If we port
11004         // inalloca to more targets, we'll have to add custom inalloca handling
11005         // in the various CC lowering callbacks.
11006         Flags.setByVal();
11007       }
11008       if (Arg.hasAttribute(Attribute::Preallocated)) {
11009         Flags.setPreallocated();
11010         // Set the byval flag for CCAssignFn callbacks that don't know about
11011         // preallocated.  This way we can know how many bytes we should've
11012         // allocated and how many bytes a callee cleanup function will pop.  If
11013         // we port preallocated to more targets, we'll have to add custom
11014         // preallocated handling in the various CC lowering callbacks.
11015         Flags.setByVal();
11016       }
11017 
11018       // Certain targets (such as MIPS), may have a different ABI alignment
11019       // for a type depending on the context. Give the target a chance to
11020       // specify the alignment it wants.
11021       const Align OriginalAlignment(
11022           TLI->getABIAlignmentForCallingConv(ArgTy, DL));
11023       Flags.setOrigAlign(OriginalAlignment);
11024 
11025       Align MemAlign;
11026       Type *ArgMemTy = nullptr;
11027       if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated() ||
11028           Flags.isByRef()) {
11029         if (!ArgMemTy)
11030           ArgMemTy = Arg.getPointeeInMemoryValueType();
11031 
11032         uint64_t MemSize = DL.getTypeAllocSize(ArgMemTy);
11033 
11034         // For in-memory arguments, size and alignment should be passed from FE.
11035         // BE will guess if this info is not there but there are cases it cannot
11036         // get right.
11037         if (auto ParamAlign = Arg.getParamStackAlign())
11038           MemAlign = *ParamAlign;
11039         else if ((ParamAlign = Arg.getParamAlign()))
11040           MemAlign = *ParamAlign;
11041         else
11042           MemAlign = Align(TLI->getByValTypeAlignment(ArgMemTy, DL));
11043         if (Flags.isByRef())
11044           Flags.setByRefSize(MemSize);
11045         else
11046           Flags.setByValSize(MemSize);
11047       } else if (auto ParamAlign = Arg.getParamStackAlign()) {
11048         MemAlign = *ParamAlign;
11049       } else {
11050         MemAlign = OriginalAlignment;
11051       }
11052       Flags.setMemAlign(MemAlign);
11053 
11054       if (Arg.hasAttribute(Attribute::Nest))
11055         Flags.setNest();
11056       if (NeedsRegBlock)
11057         Flags.setInConsecutiveRegs();
11058       if (ArgCopyElisionCandidates.count(&Arg))
11059         Flags.setCopyElisionCandidate();
11060       if (Arg.hasAttribute(Attribute::Returned))
11061         Flags.setReturned();
11062 
11063       MVT RegisterVT = TLI->getRegisterTypeForCallingConv(
11064           *CurDAG->getContext(), F.getCallingConv(), VT);
11065       unsigned NumRegs = TLI->getNumRegistersForCallingConv(
11066           *CurDAG->getContext(), F.getCallingConv(), VT);
11067       for (unsigned i = 0; i != NumRegs; ++i) {
11068         // For scalable vectors, use the minimum size; individual targets
11069         // are responsible for handling scalable vector arguments and
11070         // return values.
11071         ISD::InputArg MyFlags(
11072             Flags, RegisterVT, VT, isArgValueUsed, ArgNo,
11073             PartBase + i * RegisterVT.getStoreSize().getKnownMinValue());
11074         if (NumRegs > 1 && i == 0)
11075           MyFlags.Flags.setSplit();
11076         // if it isn't first piece, alignment must be 1
11077         else if (i > 0) {
11078           MyFlags.Flags.setOrigAlign(Align(1));
11079           if (i == NumRegs - 1)
11080             MyFlags.Flags.setSplitEnd();
11081         }
11082         Ins.push_back(MyFlags);
11083       }
11084       if (NeedsRegBlock && Value == NumValues - 1)
11085         Ins[Ins.size() - 1].Flags.setInConsecutiveRegsLast();
11086       PartBase += VT.getStoreSize().getKnownMinValue();
11087     }
11088   }
11089 
11090   // Call the target to set up the argument values.
11091   SmallVector<SDValue, 8> InVals;
11092   SDValue NewRoot = TLI->LowerFormalArguments(
11093       DAG.getRoot(), F.getCallingConv(), F.isVarArg(), Ins, dl, DAG, InVals);
11094 
11095   // Verify that the target's LowerFormalArguments behaved as expected.
11096   assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other &&
11097          "LowerFormalArguments didn't return a valid chain!");
11098   assert(InVals.size() == Ins.size() &&
11099          "LowerFormalArguments didn't emit the correct number of values!");
11100   LLVM_DEBUG({
11101     for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
11102       assert(InVals[i].getNode() &&
11103              "LowerFormalArguments emitted a null value!");
11104       assert(EVT(Ins[i].VT) == InVals[i].getValueType() &&
11105              "LowerFormalArguments emitted a value with the wrong type!");
11106     }
11107   });
11108 
11109   // Update the DAG with the new chain value resulting from argument lowering.
11110   DAG.setRoot(NewRoot);
11111 
11112   // Set up the argument values.
11113   unsigned i = 0;
11114   if (!FuncInfo->CanLowerReturn) {
11115     // Create a virtual register for the sret pointer, and put in a copy
11116     // from the sret argument into it.
11117     SmallVector<EVT, 1> ValueVTs;
11118     ComputeValueVTs(*TLI, DAG.getDataLayout(),
11119                     PointerType::get(F.getContext(),
11120                                      DAG.getDataLayout().getAllocaAddrSpace()),
11121                     ValueVTs);
11122     MVT VT = ValueVTs[0].getSimpleVT();
11123     MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
11124     std::optional<ISD::NodeType> AssertOp;
11125     SDValue ArgValue = getCopyFromParts(DAG, dl, &InVals[0], 1, RegVT, VT,
11126                                         nullptr, F.getCallingConv(), AssertOp);
11127 
11128     MachineFunction& MF = SDB->DAG.getMachineFunction();
11129     MachineRegisterInfo& RegInfo = MF.getRegInfo();
11130     Register SRetReg =
11131         RegInfo.createVirtualRegister(TLI->getRegClassFor(RegVT));
11132     FuncInfo->DemoteRegister = SRetReg;
11133     NewRoot =
11134         SDB->DAG.getCopyToReg(NewRoot, SDB->getCurSDLoc(), SRetReg, ArgValue);
11135     DAG.setRoot(NewRoot);
11136 
11137     // i indexes lowered arguments.  Bump it past the hidden sret argument.
11138     ++i;
11139   }
11140 
11141   SmallVector<SDValue, 4> Chains;
11142   DenseMap<int, int> ArgCopyElisionFrameIndexMap;
11143   for (const Argument &Arg : F.args()) {
11144     SmallVector<SDValue, 4> ArgValues;
11145     SmallVector<EVT, 4> ValueVTs;
11146     ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
11147     unsigned NumValues = ValueVTs.size();
11148     if (NumValues == 0)
11149       continue;
11150 
11151     bool ArgHasUses = !Arg.use_empty();
11152 
11153     // Elide the copying store if the target loaded this argument from a
11154     // suitable fixed stack object.
11155     if (Ins[i].Flags.isCopyElisionCandidate()) {
11156       unsigned NumParts = 0;
11157       for (EVT VT : ValueVTs)
11158         NumParts += TLI->getNumRegistersForCallingConv(*CurDAG->getContext(),
11159                                                        F.getCallingConv(), VT);
11160 
11161       tryToElideArgumentCopy(*FuncInfo, Chains, ArgCopyElisionFrameIndexMap,
11162                              ElidedArgCopyInstrs, ArgCopyElisionCandidates, Arg,
11163                              ArrayRef(&InVals[i], NumParts), ArgHasUses);
11164     }
11165 
11166     // If this argument is unused then remember its value. It is used to generate
11167     // debugging information.
11168     bool isSwiftErrorArg =
11169         TLI->supportSwiftError() &&
11170         Arg.hasAttribute(Attribute::SwiftError);
11171     if (!ArgHasUses && !isSwiftErrorArg) {
11172       SDB->setUnusedArgValue(&Arg, InVals[i]);
11173 
11174       // Also remember any frame index for use in FastISel.
11175       if (FrameIndexSDNode *FI =
11176           dyn_cast<FrameIndexSDNode>(InVals[i].getNode()))
11177         FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11178     }
11179 
11180     for (unsigned Val = 0; Val != NumValues; ++Val) {
11181       EVT VT = ValueVTs[Val];
11182       MVT PartVT = TLI->getRegisterTypeForCallingConv(*CurDAG->getContext(),
11183                                                       F.getCallingConv(), VT);
11184       unsigned NumParts = TLI->getNumRegistersForCallingConv(
11185           *CurDAG->getContext(), F.getCallingConv(), VT);
11186 
11187       // Even an apparent 'unused' swifterror argument needs to be returned. So
11188       // we do generate a copy for it that can be used on return from the
11189       // function.
11190       if (ArgHasUses || isSwiftErrorArg) {
11191         std::optional<ISD::NodeType> AssertOp;
11192         if (Arg.hasAttribute(Attribute::SExt))
11193           AssertOp = ISD::AssertSext;
11194         else if (Arg.hasAttribute(Attribute::ZExt))
11195           AssertOp = ISD::AssertZext;
11196 
11197         ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], NumParts,
11198                                              PartVT, VT, nullptr,
11199                                              F.getCallingConv(), AssertOp));
11200       }
11201 
11202       i += NumParts;
11203     }
11204 
11205     // We don't need to do anything else for unused arguments.
11206     if (ArgValues.empty())
11207       continue;
11208 
11209     // Note down frame index.
11210     if (FrameIndexSDNode *FI =
11211         dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode()))
11212       FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11213 
11214     SDValue Res = DAG.getMergeValues(ArrayRef(ArgValues.data(), NumValues),
11215                                      SDB->getCurSDLoc());
11216 
11217     SDB->setValue(&Arg, Res);
11218     if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::BUILD_PAIR) {
11219       // We want to associate the argument with the frame index, among
11220       // involved operands, that correspond to the lowest address. The
11221       // getCopyFromParts function, called earlier, is swapping the order of
11222       // the operands to BUILD_PAIR depending on endianness. The result of
11223       // that swapping is that the least significant bits of the argument will
11224       // be in the first operand of the BUILD_PAIR node, and the most
11225       // significant bits will be in the second operand.
11226       unsigned LowAddressOp = DAG.getDataLayout().isBigEndian() ? 1 : 0;
11227       if (LoadSDNode *LNode =
11228           dyn_cast<LoadSDNode>(Res.getOperand(LowAddressOp).getNode()))
11229         if (FrameIndexSDNode *FI =
11230             dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
11231           FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11232     }
11233 
11234     // Analyses past this point are naive and don't expect an assertion.
11235     if (Res.getOpcode() == ISD::AssertZext)
11236       Res = Res.getOperand(0);
11237 
11238     // Update the SwiftErrorVRegDefMap.
11239     if (Res.getOpcode() == ISD::CopyFromReg && isSwiftErrorArg) {
11240       unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
11241       if (Register::isVirtualRegister(Reg))
11242         SwiftError->setCurrentVReg(FuncInfo->MBB, SwiftError->getFunctionArg(),
11243                                    Reg);
11244     }
11245 
11246     // If this argument is live outside of the entry block, insert a copy from
11247     // wherever we got it to the vreg that other BB's will reference it as.
11248     if (Res.getOpcode() == ISD::CopyFromReg) {
11249       // If we can, though, try to skip creating an unnecessary vreg.
11250       // FIXME: This isn't very clean... it would be nice to make this more
11251       // general.
11252       unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
11253       if (Register::isVirtualRegister(Reg)) {
11254         FuncInfo->ValueMap[&Arg] = Reg;
11255         continue;
11256       }
11257     }
11258     if (!isOnlyUsedInEntryBlock(&Arg, TM.Options.EnableFastISel)) {
11259       FuncInfo->InitializeRegForValue(&Arg);
11260       SDB->CopyToExportRegsIfNeeded(&Arg);
11261     }
11262   }
11263 
11264   if (!Chains.empty()) {
11265     Chains.push_back(NewRoot);
11266     NewRoot = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
11267   }
11268 
11269   DAG.setRoot(NewRoot);
11270 
11271   assert(i == InVals.size() && "Argument register count mismatch!");
11272 
11273   // If any argument copy elisions occurred and we have debug info, update the
11274   // stale frame indices used in the dbg.declare variable info table.
11275   if (!ArgCopyElisionFrameIndexMap.empty()) {
11276     for (MachineFunction::VariableDbgInfo &VI :
11277          MF->getInStackSlotVariableDbgInfo()) {
11278       auto I = ArgCopyElisionFrameIndexMap.find(VI.getStackSlot());
11279       if (I != ArgCopyElisionFrameIndexMap.end())
11280         VI.updateStackSlot(I->second);
11281     }
11282   }
11283 
11284   // Finally, if the target has anything special to do, allow it to do so.
11285   emitFunctionEntryCode();
11286 }
11287 
11288 /// Handle PHI nodes in successor blocks.  Emit code into the SelectionDAG to
11289 /// ensure constants are generated when needed.  Remember the virtual registers
11290 /// that need to be added to the Machine PHI nodes as input.  We cannot just
11291 /// directly add them, because expansion might result in multiple MBB's for one
11292 /// BB.  As such, the start of the BB might correspond to a different MBB than
11293 /// the end.
11294 void
11295 SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
11296   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11297 
11298   SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
11299 
11300   // Check PHI nodes in successors that expect a value to be available from this
11301   // block.
11302   for (const BasicBlock *SuccBB : successors(LLVMBB->getTerminator())) {
11303     if (!isa<PHINode>(SuccBB->begin())) continue;
11304     MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
11305 
11306     // If this terminator has multiple identical successors (common for
11307     // switches), only handle each succ once.
11308     if (!SuccsHandled.insert(SuccMBB).second)
11309       continue;
11310 
11311     MachineBasicBlock::iterator MBBI = SuccMBB->begin();
11312 
11313     // At this point we know that there is a 1-1 correspondence between LLVM PHI
11314     // nodes and Machine PHI nodes, but the incoming operands have not been
11315     // emitted yet.
11316     for (const PHINode &PN : SuccBB->phis()) {
11317       // Ignore dead phi's.
11318       if (PN.use_empty())
11319         continue;
11320 
11321       // Skip empty types
11322       if (PN.getType()->isEmptyTy())
11323         continue;
11324 
11325       unsigned Reg;
11326       const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
11327 
11328       if (const auto *C = dyn_cast<Constant>(PHIOp)) {
11329         unsigned &RegOut = ConstantsOut[C];
11330         if (RegOut == 0) {
11331           RegOut = FuncInfo.CreateRegs(C);
11332           // We need to zero/sign extend ConstantInt phi operands to match
11333           // assumptions in FunctionLoweringInfo::ComputePHILiveOutRegInfo.
11334           ISD::NodeType ExtendType = ISD::ANY_EXTEND;
11335           if (auto *CI = dyn_cast<ConstantInt>(C))
11336             ExtendType = TLI.signExtendConstant(CI) ? ISD::SIGN_EXTEND
11337                                                     : ISD::ZERO_EXTEND;
11338           CopyValueToVirtualRegister(C, RegOut, ExtendType);
11339         }
11340         Reg = RegOut;
11341       } else {
11342         DenseMap<const Value *, Register>::iterator I =
11343           FuncInfo.ValueMap.find(PHIOp);
11344         if (I != FuncInfo.ValueMap.end())
11345           Reg = I->second;
11346         else {
11347           assert(isa<AllocaInst>(PHIOp) &&
11348                  FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
11349                  "Didn't codegen value into a register!??");
11350           Reg = FuncInfo.CreateRegs(PHIOp);
11351           CopyValueToVirtualRegister(PHIOp, Reg);
11352         }
11353       }
11354 
11355       // Remember that this register needs to added to the machine PHI node as
11356       // the input for this MBB.
11357       SmallVector<EVT, 4> ValueVTs;
11358       ComputeValueVTs(TLI, DAG.getDataLayout(), PN.getType(), ValueVTs);
11359       for (EVT VT : ValueVTs) {
11360         const unsigned NumRegisters = TLI.getNumRegisters(*DAG.getContext(), VT);
11361         for (unsigned i = 0; i != NumRegisters; ++i)
11362           FuncInfo.PHINodesToUpdate.push_back(
11363               std::make_pair(&*MBBI++, Reg + i));
11364         Reg += NumRegisters;
11365       }
11366     }
11367   }
11368 
11369   ConstantsOut.clear();
11370 }
11371 
11372 MachineBasicBlock *SelectionDAGBuilder::NextBlock(MachineBasicBlock *MBB) {
11373   MachineFunction::iterator I(MBB);
11374   if (++I == FuncInfo.MF->end())
11375     return nullptr;
11376   return &*I;
11377 }
11378 
11379 /// During lowering new call nodes can be created (such as memset, etc.).
11380 /// Those will become new roots of the current DAG, but complications arise
11381 /// when they are tail calls. In such cases, the call lowering will update
11382 /// the root, but the builder still needs to know that a tail call has been
11383 /// lowered in order to avoid generating an additional return.
11384 void SelectionDAGBuilder::updateDAGForMaybeTailCall(SDValue MaybeTC) {
11385   // If the node is null, we do have a tail call.
11386   if (MaybeTC.getNode() != nullptr)
11387     DAG.setRoot(MaybeTC);
11388   else
11389     HasTailCall = true;
11390 }
11391 
11392 void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W, Value *Cond,
11393                                         MachineBasicBlock *SwitchMBB,
11394                                         MachineBasicBlock *DefaultMBB) {
11395   MachineFunction *CurMF = FuncInfo.MF;
11396   MachineBasicBlock *NextMBB = nullptr;
11397   MachineFunction::iterator BBI(W.MBB);
11398   if (++BBI != FuncInfo.MF->end())
11399     NextMBB = &*BBI;
11400 
11401   unsigned Size = W.LastCluster - W.FirstCluster + 1;
11402 
11403   BranchProbabilityInfo *BPI = FuncInfo.BPI;
11404 
11405   if (Size == 2 && W.MBB == SwitchMBB) {
11406     // If any two of the cases has the same destination, and if one value
11407     // is the same as the other, but has one bit unset that the other has set,
11408     // use bit manipulation to do two compares at once.  For example:
11409     // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
11410     // TODO: This could be extended to merge any 2 cases in switches with 3
11411     // cases.
11412     // TODO: Handle cases where W.CaseBB != SwitchBB.
11413     CaseCluster &Small = *W.FirstCluster;
11414     CaseCluster &Big = *W.LastCluster;
11415 
11416     if (Small.Low == Small.High && Big.Low == Big.High &&
11417         Small.MBB == Big.MBB) {
11418       const APInt &SmallValue = Small.Low->getValue();
11419       const APInt &BigValue = Big.Low->getValue();
11420 
11421       // Check that there is only one bit different.
11422       APInt CommonBit = BigValue ^ SmallValue;
11423       if (CommonBit.isPowerOf2()) {
11424         SDValue CondLHS = getValue(Cond);
11425         EVT VT = CondLHS.getValueType();
11426         SDLoc DL = getCurSDLoc();
11427 
11428         SDValue Or = DAG.getNode(ISD::OR, DL, VT, CondLHS,
11429                                  DAG.getConstant(CommonBit, DL, VT));
11430         SDValue Cond = DAG.getSetCC(
11431             DL, MVT::i1, Or, DAG.getConstant(BigValue | SmallValue, DL, VT),
11432             ISD::SETEQ);
11433 
11434         // Update successor info.
11435         // Both Small and Big will jump to Small.BB, so we sum up the
11436         // probabilities.
11437         addSuccessorWithProb(SwitchMBB, Small.MBB, Small.Prob + Big.Prob);
11438         if (BPI)
11439           addSuccessorWithProb(
11440               SwitchMBB, DefaultMBB,
11441               // The default destination is the first successor in IR.
11442               BPI->getEdgeProbability(SwitchMBB->getBasicBlock(), (unsigned)0));
11443         else
11444           addSuccessorWithProb(SwitchMBB, DefaultMBB);
11445 
11446         // Insert the true branch.
11447         SDValue BrCond =
11448             DAG.getNode(ISD::BRCOND, DL, MVT::Other, getControlRoot(), Cond,
11449                         DAG.getBasicBlock(Small.MBB));
11450         // Insert the false branch.
11451         BrCond = DAG.getNode(ISD::BR, DL, MVT::Other, BrCond,
11452                              DAG.getBasicBlock(DefaultMBB));
11453 
11454         DAG.setRoot(BrCond);
11455         return;
11456       }
11457     }
11458   }
11459 
11460   if (TM.getOptLevel() != CodeGenOptLevel::None) {
11461     // Here, we order cases by probability so the most likely case will be
11462     // checked first. However, two clusters can have the same probability in
11463     // which case their relative ordering is non-deterministic. So we use Low
11464     // as a tie-breaker as clusters are guaranteed to never overlap.
11465     llvm::sort(W.FirstCluster, W.LastCluster + 1,
11466                [](const CaseCluster &a, const CaseCluster &b) {
11467       return a.Prob != b.Prob ?
11468              a.Prob > b.Prob :
11469              a.Low->getValue().slt(b.Low->getValue());
11470     });
11471 
11472     // Rearrange the case blocks so that the last one falls through if possible
11473     // without changing the order of probabilities.
11474     for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster; ) {
11475       --I;
11476       if (I->Prob > W.LastCluster->Prob)
11477         break;
11478       if (I->Kind == CC_Range && I->MBB == NextMBB) {
11479         std::swap(*I, *W.LastCluster);
11480         break;
11481       }
11482     }
11483   }
11484 
11485   // Compute total probability.
11486   BranchProbability DefaultProb = W.DefaultProb;
11487   BranchProbability UnhandledProbs = DefaultProb;
11488   for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
11489     UnhandledProbs += I->Prob;
11490 
11491   MachineBasicBlock *CurMBB = W.MBB;
11492   for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
11493     bool FallthroughUnreachable = false;
11494     MachineBasicBlock *Fallthrough;
11495     if (I == W.LastCluster) {
11496       // For the last cluster, fall through to the default destination.
11497       Fallthrough = DefaultMBB;
11498       FallthroughUnreachable = isa<UnreachableInst>(
11499           DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg());
11500     } else {
11501       Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
11502       CurMF->insert(BBI, Fallthrough);
11503       // Put Cond in a virtual register to make it available from the new blocks.
11504       ExportFromCurrentBlock(Cond);
11505     }
11506     UnhandledProbs -= I->Prob;
11507 
11508     switch (I->Kind) {
11509       case CC_JumpTable: {
11510         // FIXME: Optimize away range check based on pivot comparisons.
11511         JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first;
11512         SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second;
11513 
11514         // The jump block hasn't been inserted yet; insert it here.
11515         MachineBasicBlock *JumpMBB = JT->MBB;
11516         CurMF->insert(BBI, JumpMBB);
11517 
11518         auto JumpProb = I->Prob;
11519         auto FallthroughProb = UnhandledProbs;
11520 
11521         // If the default statement is a target of the jump table, we evenly
11522         // distribute the default probability to successors of CurMBB. Also
11523         // update the probability on the edge from JumpMBB to Fallthrough.
11524         for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
11525                                               SE = JumpMBB->succ_end();
11526              SI != SE; ++SI) {
11527           if (*SI == DefaultMBB) {
11528             JumpProb += DefaultProb / 2;
11529             FallthroughProb -= DefaultProb / 2;
11530             JumpMBB->setSuccProbability(SI, DefaultProb / 2);
11531             JumpMBB->normalizeSuccProbs();
11532             break;
11533           }
11534         }
11535 
11536         // If the default clause is unreachable, propagate that knowledge into
11537         // JTH->FallthroughUnreachable which will use it to suppress the range
11538         // check.
11539         //
11540         // However, don't do this if we're doing branch target enforcement,
11541         // because a table branch _without_ a range check can be a tempting JOP
11542         // gadget - out-of-bounds inputs that are impossible in correct
11543         // execution become possible again if an attacker can influence the
11544         // control flow. So if an attacker doesn't already have a BTI bypass
11545         // available, we don't want them to be able to get one out of this
11546         // table branch.
11547         if (FallthroughUnreachable) {
11548           Function &CurFunc = CurMF->getFunction();
11549           bool HasBranchTargetEnforcement = false;
11550           if (CurFunc.hasFnAttribute("branch-target-enforcement")) {
11551             HasBranchTargetEnforcement =
11552                 CurFunc.getFnAttribute("branch-target-enforcement")
11553                     .getValueAsBool();
11554           } else {
11555             HasBranchTargetEnforcement =
11556                 CurMF->getMMI().getModule()->getModuleFlag(
11557                     "branch-target-enforcement");
11558           }
11559           if (!HasBranchTargetEnforcement)
11560             JTH->FallthroughUnreachable = true;
11561         }
11562 
11563         if (!JTH->FallthroughUnreachable)
11564           addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
11565         addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
11566         CurMBB->normalizeSuccProbs();
11567 
11568         // The jump table header will be inserted in our current block, do the
11569         // range check, and fall through to our fallthrough block.
11570         JTH->HeaderBB = CurMBB;
11571         JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
11572 
11573         // If we're in the right place, emit the jump table header right now.
11574         if (CurMBB == SwitchMBB) {
11575           visitJumpTableHeader(*JT, *JTH, SwitchMBB);
11576           JTH->Emitted = true;
11577         }
11578         break;
11579       }
11580       case CC_BitTests: {
11581         // FIXME: Optimize away range check based on pivot comparisons.
11582         BitTestBlock *BTB = &SL->BitTestCases[I->BTCasesIndex];
11583 
11584         // The bit test blocks haven't been inserted yet; insert them here.
11585         for (BitTestCase &BTC : BTB->Cases)
11586           CurMF->insert(BBI, BTC.ThisBB);
11587 
11588         // Fill in fields of the BitTestBlock.
11589         BTB->Parent = CurMBB;
11590         BTB->Default = Fallthrough;
11591 
11592         BTB->DefaultProb = UnhandledProbs;
11593         // If the cases in bit test don't form a contiguous range, we evenly
11594         // distribute the probability on the edge to Fallthrough to two
11595         // successors of CurMBB.
11596         if (!BTB->ContiguousRange) {
11597           BTB->Prob += DefaultProb / 2;
11598           BTB->DefaultProb -= DefaultProb / 2;
11599         }
11600 
11601         if (FallthroughUnreachable)
11602           BTB->FallthroughUnreachable = true;
11603 
11604         // If we're in the right place, emit the bit test header right now.
11605         if (CurMBB == SwitchMBB) {
11606           visitBitTestHeader(*BTB, SwitchMBB);
11607           BTB->Emitted = true;
11608         }
11609         break;
11610       }
11611       case CC_Range: {
11612         const Value *RHS, *LHS, *MHS;
11613         ISD::CondCode CC;
11614         if (I->Low == I->High) {
11615           // Check Cond == I->Low.
11616           CC = ISD::SETEQ;
11617           LHS = Cond;
11618           RHS=I->Low;
11619           MHS = nullptr;
11620         } else {
11621           // Check I->Low <= Cond <= I->High.
11622           CC = ISD::SETLE;
11623           LHS = I->Low;
11624           MHS = Cond;
11625           RHS = I->High;
11626         }
11627 
11628         // If Fallthrough is unreachable, fold away the comparison.
11629         if (FallthroughUnreachable)
11630           CC = ISD::SETTRUE;
11631 
11632         // The false probability is the sum of all unhandled cases.
11633         CaseBlock CB(CC, LHS, RHS, MHS, I->MBB, Fallthrough, CurMBB,
11634                      getCurSDLoc(), I->Prob, UnhandledProbs);
11635 
11636         if (CurMBB == SwitchMBB)
11637           visitSwitchCase(CB, SwitchMBB);
11638         else
11639           SL->SwitchCases.push_back(CB);
11640 
11641         break;
11642       }
11643     }
11644     CurMBB = Fallthrough;
11645   }
11646 }
11647 
11648 unsigned SelectionDAGBuilder::caseClusterRank(const CaseCluster &CC,
11649                                               CaseClusterIt First,
11650                                               CaseClusterIt Last) {
11651   return std::count_if(First, Last + 1, [&](const CaseCluster &X) {
11652     if (X.Prob != CC.Prob)
11653       return X.Prob > CC.Prob;
11654 
11655     // Ties are broken by comparing the case value.
11656     return X.Low->getValue().slt(CC.Low->getValue());
11657   });
11658 }
11659 
11660 void SelectionDAGBuilder::splitWorkItem(SwitchWorkList &WorkList,
11661                                         const SwitchWorkListItem &W,
11662                                         Value *Cond,
11663                                         MachineBasicBlock *SwitchMBB) {
11664   assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) &&
11665          "Clusters not sorted?");
11666 
11667   assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!");
11668 
11669   // Balance the tree based on branch probabilities to create a near-optimal (in
11670   // terms of search time given key frequency) binary search tree. See e.g. Kurt
11671   // Mehlhorn "Nearly Optimal Binary Search Trees" (1975).
11672   CaseClusterIt LastLeft = W.FirstCluster;
11673   CaseClusterIt FirstRight = W.LastCluster;
11674   auto LeftProb = LastLeft->Prob + W.DefaultProb / 2;
11675   auto RightProb = FirstRight->Prob + W.DefaultProb / 2;
11676 
11677   // Move LastLeft and FirstRight towards each other from opposite directions to
11678   // find a partitioning of the clusters which balances the probability on both
11679   // sides. If LeftProb and RightProb are equal, alternate which side is
11680   // taken to ensure 0-probability nodes are distributed evenly.
11681   unsigned I = 0;
11682   while (LastLeft + 1 < FirstRight) {
11683     if (LeftProb < RightProb || (LeftProb == RightProb && (I & 1)))
11684       LeftProb += (++LastLeft)->Prob;
11685     else
11686       RightProb += (--FirstRight)->Prob;
11687     I++;
11688   }
11689 
11690   while (true) {
11691     // Our binary search tree differs from a typical BST in that ours can have up
11692     // to three values in each leaf. The pivot selection above doesn't take that
11693     // into account, which means the tree might require more nodes and be less
11694     // efficient. We compensate for this here.
11695 
11696     unsigned NumLeft = LastLeft - W.FirstCluster + 1;
11697     unsigned NumRight = W.LastCluster - FirstRight + 1;
11698 
11699     if (std::min(NumLeft, NumRight) < 3 && std::max(NumLeft, NumRight) > 3) {
11700       // If one side has less than 3 clusters, and the other has more than 3,
11701       // consider taking a cluster from the other side.
11702 
11703       if (NumLeft < NumRight) {
11704         // Consider moving the first cluster on the right to the left side.
11705         CaseCluster &CC = *FirstRight;
11706         unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
11707         unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
11708         if (LeftSideRank <= RightSideRank) {
11709           // Moving the cluster to the left does not demote it.
11710           ++LastLeft;
11711           ++FirstRight;
11712           continue;
11713         }
11714       } else {
11715         assert(NumRight < NumLeft);
11716         // Consider moving the last element on the left to the right side.
11717         CaseCluster &CC = *LastLeft;
11718         unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
11719         unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
11720         if (RightSideRank <= LeftSideRank) {
11721           // Moving the cluster to the right does not demot it.
11722           --LastLeft;
11723           --FirstRight;
11724           continue;
11725         }
11726       }
11727     }
11728     break;
11729   }
11730 
11731   assert(LastLeft + 1 == FirstRight);
11732   assert(LastLeft >= W.FirstCluster);
11733   assert(FirstRight <= W.LastCluster);
11734 
11735   // Use the first element on the right as pivot since we will make less-than
11736   // comparisons against it.
11737   CaseClusterIt PivotCluster = FirstRight;
11738   assert(PivotCluster > W.FirstCluster);
11739   assert(PivotCluster <= W.LastCluster);
11740 
11741   CaseClusterIt FirstLeft = W.FirstCluster;
11742   CaseClusterIt LastRight = W.LastCluster;
11743 
11744   const ConstantInt *Pivot = PivotCluster->Low;
11745 
11746   // New blocks will be inserted immediately after the current one.
11747   MachineFunction::iterator BBI(W.MBB);
11748   ++BBI;
11749 
11750   // We will branch to the LHS if Value < Pivot. If LHS is a single cluster,
11751   // we can branch to its destination directly if it's squeezed exactly in
11752   // between the known lower bound and Pivot - 1.
11753   MachineBasicBlock *LeftMBB;
11754   if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&
11755       FirstLeft->Low == W.GE &&
11756       (FirstLeft->High->getValue() + 1LL) == Pivot->getValue()) {
11757     LeftMBB = FirstLeft->MBB;
11758   } else {
11759     LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
11760     FuncInfo.MF->insert(BBI, LeftMBB);
11761     WorkList.push_back(
11762         {LeftMBB, FirstLeft, LastLeft, W.GE, Pivot, W.DefaultProb / 2});
11763     // Put Cond in a virtual register to make it available from the new blocks.
11764     ExportFromCurrentBlock(Cond);
11765   }
11766 
11767   // Similarly, we will branch to the RHS if Value >= Pivot. If RHS is a
11768   // single cluster, RHS.Low == Pivot, and we can branch to its destination
11769   // directly if RHS.High equals the current upper bound.
11770   MachineBasicBlock *RightMBB;
11771   if (FirstRight == LastRight && FirstRight->Kind == CC_Range &&
11772       W.LT && (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) {
11773     RightMBB = FirstRight->MBB;
11774   } else {
11775     RightMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
11776     FuncInfo.MF->insert(BBI, RightMBB);
11777     WorkList.push_back(
11778         {RightMBB, FirstRight, LastRight, Pivot, W.LT, W.DefaultProb / 2});
11779     // Put Cond in a virtual register to make it available from the new blocks.
11780     ExportFromCurrentBlock(Cond);
11781   }
11782 
11783   // Create the CaseBlock record that will be used to lower the branch.
11784   CaseBlock CB(ISD::SETLT, Cond, Pivot, nullptr, LeftMBB, RightMBB, W.MBB,
11785                getCurSDLoc(), LeftProb, RightProb);
11786 
11787   if (W.MBB == SwitchMBB)
11788     visitSwitchCase(CB, SwitchMBB);
11789   else
11790     SL->SwitchCases.push_back(CB);
11791 }
11792 
11793 // Scale CaseProb after peeling a case with the probablity of PeeledCaseProb
11794 // from the swith statement.
11795 static BranchProbability scaleCaseProbality(BranchProbability CaseProb,
11796                                             BranchProbability PeeledCaseProb) {
11797   if (PeeledCaseProb == BranchProbability::getOne())
11798     return BranchProbability::getZero();
11799   BranchProbability SwitchProb = PeeledCaseProb.getCompl();
11800 
11801   uint32_t Numerator = CaseProb.getNumerator();
11802   uint32_t Denominator = SwitchProb.scale(CaseProb.getDenominator());
11803   return BranchProbability(Numerator, std::max(Numerator, Denominator));
11804 }
11805 
11806 // Try to peel the top probability case if it exceeds the threshold.
11807 // Return current MachineBasicBlock for the switch statement if the peeling
11808 // does not occur.
11809 // If the peeling is performed, return the newly created MachineBasicBlock
11810 // for the peeled switch statement. Also update Clusters to remove the peeled
11811 // case. PeeledCaseProb is the BranchProbability for the peeled case.
11812 MachineBasicBlock *SelectionDAGBuilder::peelDominantCaseCluster(
11813     const SwitchInst &SI, CaseClusterVector &Clusters,
11814     BranchProbability &PeeledCaseProb) {
11815   MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
11816   // Don't perform if there is only one cluster or optimizing for size.
11817   if (SwitchPeelThreshold > 100 || !FuncInfo.BPI || Clusters.size() < 2 ||
11818       TM.getOptLevel() == CodeGenOptLevel::None ||
11819       SwitchMBB->getParent()->getFunction().hasMinSize())
11820     return SwitchMBB;
11821 
11822   BranchProbability TopCaseProb = BranchProbability(SwitchPeelThreshold, 100);
11823   unsigned PeeledCaseIndex = 0;
11824   bool SwitchPeeled = false;
11825   for (unsigned Index = 0; Index < Clusters.size(); ++Index) {
11826     CaseCluster &CC = Clusters[Index];
11827     if (CC.Prob < TopCaseProb)
11828       continue;
11829     TopCaseProb = CC.Prob;
11830     PeeledCaseIndex = Index;
11831     SwitchPeeled = true;
11832   }
11833   if (!SwitchPeeled)
11834     return SwitchMBB;
11835 
11836   LLVM_DEBUG(dbgs() << "Peeled one top case in switch stmt, prob: "
11837                     << TopCaseProb << "\n");
11838 
11839   // Record the MBB for the peeled switch statement.
11840   MachineFunction::iterator BBI(SwitchMBB);
11841   ++BBI;
11842   MachineBasicBlock *PeeledSwitchMBB =
11843       FuncInfo.MF->CreateMachineBasicBlock(SwitchMBB->getBasicBlock());
11844   FuncInfo.MF->insert(BBI, PeeledSwitchMBB);
11845 
11846   ExportFromCurrentBlock(SI.getCondition());
11847   auto PeeledCaseIt = Clusters.begin() + PeeledCaseIndex;
11848   SwitchWorkListItem W = {SwitchMBB, PeeledCaseIt, PeeledCaseIt,
11849                           nullptr,   nullptr,      TopCaseProb.getCompl()};
11850   lowerWorkItem(W, SI.getCondition(), SwitchMBB, PeeledSwitchMBB);
11851 
11852   Clusters.erase(PeeledCaseIt);
11853   for (CaseCluster &CC : Clusters) {
11854     LLVM_DEBUG(
11855         dbgs() << "Scale the probablity for one cluster, before scaling: "
11856                << CC.Prob << "\n");
11857     CC.Prob = scaleCaseProbality(CC.Prob, TopCaseProb);
11858     LLVM_DEBUG(dbgs() << "After scaling: " << CC.Prob << "\n");
11859   }
11860   PeeledCaseProb = TopCaseProb;
11861   return PeeledSwitchMBB;
11862 }
11863 
11864 void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
11865   // Extract cases from the switch.
11866   BranchProbabilityInfo *BPI = FuncInfo.BPI;
11867   CaseClusterVector Clusters;
11868   Clusters.reserve(SI.getNumCases());
11869   for (auto I : SI.cases()) {
11870     MachineBasicBlock *Succ = FuncInfo.MBBMap[I.getCaseSuccessor()];
11871     const ConstantInt *CaseVal = I.getCaseValue();
11872     BranchProbability Prob =
11873         BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
11874             : BranchProbability(1, SI.getNumCases() + 1);
11875     Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
11876   }
11877 
11878   MachineBasicBlock *DefaultMBB = FuncInfo.MBBMap[SI.getDefaultDest()];
11879 
11880   // Cluster adjacent cases with the same destination. We do this at all
11881   // optimization levels because it's cheap to do and will make codegen faster
11882   // if there are many clusters.
11883   sortAndRangeify(Clusters);
11884 
11885   // The branch probablity of the peeled case.
11886   BranchProbability PeeledCaseProb = BranchProbability::getZero();
11887   MachineBasicBlock *PeeledSwitchMBB =
11888       peelDominantCaseCluster(SI, Clusters, PeeledCaseProb);
11889 
11890   // If there is only the default destination, jump there directly.
11891   MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
11892   if (Clusters.empty()) {
11893     assert(PeeledSwitchMBB == SwitchMBB);
11894     SwitchMBB->addSuccessor(DefaultMBB);
11895     if (DefaultMBB != NextBlock(SwitchMBB)) {
11896       DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
11897                               getControlRoot(), DAG.getBasicBlock(DefaultMBB)));
11898     }
11899     return;
11900   }
11901 
11902   SL->findJumpTables(Clusters, &SI, getCurSDLoc(), DefaultMBB, DAG.getPSI(),
11903                      DAG.getBFI());
11904   SL->findBitTestClusters(Clusters, &SI);
11905 
11906   LLVM_DEBUG({
11907     dbgs() << "Case clusters: ";
11908     for (const CaseCluster &C : Clusters) {
11909       if (C.Kind == CC_JumpTable)
11910         dbgs() << "JT:";
11911       if (C.Kind == CC_BitTests)
11912         dbgs() << "BT:";
11913 
11914       C.Low->getValue().print(dbgs(), true);
11915       if (C.Low != C.High) {
11916         dbgs() << '-';
11917         C.High->getValue().print(dbgs(), true);
11918       }
11919       dbgs() << ' ';
11920     }
11921     dbgs() << '\n';
11922   });
11923 
11924   assert(!Clusters.empty());
11925   SwitchWorkList WorkList;
11926   CaseClusterIt First = Clusters.begin();
11927   CaseClusterIt Last = Clusters.end() - 1;
11928   auto DefaultProb = getEdgeProbability(PeeledSwitchMBB, DefaultMBB);
11929   // Scale the branchprobability for DefaultMBB if the peel occurs and
11930   // DefaultMBB is not replaced.
11931   if (PeeledCaseProb != BranchProbability::getZero() &&
11932       DefaultMBB == FuncInfo.MBBMap[SI.getDefaultDest()])
11933     DefaultProb = scaleCaseProbality(DefaultProb, PeeledCaseProb);
11934   WorkList.push_back(
11935       {PeeledSwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
11936 
11937   while (!WorkList.empty()) {
11938     SwitchWorkListItem W = WorkList.pop_back_val();
11939     unsigned NumClusters = W.LastCluster - W.FirstCluster + 1;
11940 
11941     if (NumClusters > 3 && TM.getOptLevel() != CodeGenOptLevel::None &&
11942         !DefaultMBB->getParent()->getFunction().hasMinSize()) {
11943       // For optimized builds, lower large range as a balanced binary tree.
11944       splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB);
11945       continue;
11946     }
11947 
11948     lowerWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB);
11949   }
11950 }
11951 
11952 void SelectionDAGBuilder::visitStepVector(const CallInst &I) {
11953   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11954   auto DL = getCurSDLoc();
11955   EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
11956   setValue(&I, DAG.getStepVector(DL, ResultVT));
11957 }
11958 
11959 void SelectionDAGBuilder::visitVectorReverse(const CallInst &I) {
11960   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11961   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
11962 
11963   SDLoc DL = getCurSDLoc();
11964   SDValue V = getValue(I.getOperand(0));
11965   assert(VT == V.getValueType() && "Malformed vector.reverse!");
11966 
11967   if (VT.isScalableVector()) {
11968     setValue(&I, DAG.getNode(ISD::VECTOR_REVERSE, DL, VT, V));
11969     return;
11970   }
11971 
11972   // Use VECTOR_SHUFFLE for the fixed-length vector
11973   // to maintain existing behavior.
11974   SmallVector<int, 8> Mask;
11975   unsigned NumElts = VT.getVectorMinNumElements();
11976   for (unsigned i = 0; i != NumElts; ++i)
11977     Mask.push_back(NumElts - 1 - i);
11978 
11979   setValue(&I, DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), Mask));
11980 }
11981 
11982 void SelectionDAGBuilder::visitVectorDeinterleave(const CallInst &I) {
11983   auto DL = getCurSDLoc();
11984   SDValue InVec = getValue(I.getOperand(0));
11985   EVT OutVT =
11986       InVec.getValueType().getHalfNumVectorElementsVT(*DAG.getContext());
11987 
11988   unsigned OutNumElts = OutVT.getVectorMinNumElements();
11989 
11990   // ISD Node needs the input vectors split into two equal parts
11991   SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OutVT, InVec,
11992                            DAG.getVectorIdxConstant(0, DL));
11993   SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OutVT, InVec,
11994                            DAG.getVectorIdxConstant(OutNumElts, DL));
11995 
11996   // Use VECTOR_SHUFFLE for fixed-length vectors to benefit from existing
11997   // legalisation and combines.
11998   if (OutVT.isFixedLengthVector()) {
11999     SDValue Even = DAG.getVectorShuffle(OutVT, DL, Lo, Hi,
12000                                         createStrideMask(0, 2, OutNumElts));
12001     SDValue Odd = DAG.getVectorShuffle(OutVT, DL, Lo, Hi,
12002                                        createStrideMask(1, 2, OutNumElts));
12003     SDValue Res = DAG.getMergeValues({Even, Odd}, getCurSDLoc());
12004     setValue(&I, Res);
12005     return;
12006   }
12007 
12008   SDValue Res = DAG.getNode(ISD::VECTOR_DEINTERLEAVE, DL,
12009                             DAG.getVTList(OutVT, OutVT), Lo, Hi);
12010   setValue(&I, Res);
12011 }
12012 
12013 void SelectionDAGBuilder::visitVectorInterleave(const CallInst &I) {
12014   auto DL = getCurSDLoc();
12015   EVT InVT = getValue(I.getOperand(0)).getValueType();
12016   SDValue InVec0 = getValue(I.getOperand(0));
12017   SDValue InVec1 = getValue(I.getOperand(1));
12018   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12019   EVT OutVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
12020 
12021   // Use VECTOR_SHUFFLE for fixed-length vectors to benefit from existing
12022   // legalisation and combines.
12023   if (OutVT.isFixedLengthVector()) {
12024     unsigned NumElts = InVT.getVectorMinNumElements();
12025     SDValue V = DAG.getNode(ISD::CONCAT_VECTORS, DL, OutVT, InVec0, InVec1);
12026     setValue(&I, DAG.getVectorShuffle(OutVT, DL, V, DAG.getUNDEF(OutVT),
12027                                       createInterleaveMask(NumElts, 2)));
12028     return;
12029   }
12030 
12031   SDValue Res = DAG.getNode(ISD::VECTOR_INTERLEAVE, DL,
12032                             DAG.getVTList(InVT, InVT), InVec0, InVec1);
12033   Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, OutVT, Res.getValue(0),
12034                     Res.getValue(1));
12035   setValue(&I, Res);
12036 }
12037 
12038 void SelectionDAGBuilder::visitFreeze(const FreezeInst &I) {
12039   SmallVector<EVT, 4> ValueVTs;
12040   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
12041                   ValueVTs);
12042   unsigned NumValues = ValueVTs.size();
12043   if (NumValues == 0) return;
12044 
12045   SmallVector<SDValue, 4> Values(NumValues);
12046   SDValue Op = getValue(I.getOperand(0));
12047 
12048   for (unsigned i = 0; i != NumValues; ++i)
12049     Values[i] = DAG.getNode(ISD::FREEZE, getCurSDLoc(), ValueVTs[i],
12050                             SDValue(Op.getNode(), Op.getResNo() + i));
12051 
12052   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
12053                            DAG.getVTList(ValueVTs), Values));
12054 }
12055 
12056 void SelectionDAGBuilder::visitVectorSplice(const CallInst &I) {
12057   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12058   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
12059 
12060   SDLoc DL = getCurSDLoc();
12061   SDValue V1 = getValue(I.getOperand(0));
12062   SDValue V2 = getValue(I.getOperand(1));
12063   int64_t Imm = cast<ConstantInt>(I.getOperand(2))->getSExtValue();
12064 
12065   // VECTOR_SHUFFLE doesn't support a scalable mask so use a dedicated node.
12066   if (VT.isScalableVector()) {
12067     MVT IdxVT = TLI.getVectorIdxTy(DAG.getDataLayout());
12068     setValue(&I, DAG.getNode(ISD::VECTOR_SPLICE, DL, VT, V1, V2,
12069                              DAG.getConstant(Imm, DL, IdxVT)));
12070     return;
12071   }
12072 
12073   unsigned NumElts = VT.getVectorNumElements();
12074 
12075   uint64_t Idx = (NumElts + Imm) % NumElts;
12076 
12077   // Use VECTOR_SHUFFLE to maintain original behaviour for fixed-length vectors.
12078   SmallVector<int, 8> Mask;
12079   for (unsigned i = 0; i < NumElts; ++i)
12080     Mask.push_back(Idx + i);
12081   setValue(&I, DAG.getVectorShuffle(VT, DL, V1, V2, Mask));
12082 }
12083 
12084 // Consider the following MIR after SelectionDAG, which produces output in
12085 // phyregs in the first case or virtregs in the second case.
12086 //
12087 // INLINEASM_BR ..., implicit-def $ebx, ..., implicit-def $edx
12088 // %5:gr32 = COPY $ebx
12089 // %6:gr32 = COPY $edx
12090 // %1:gr32 = COPY %6:gr32
12091 // %0:gr32 = COPY %5:gr32
12092 //
12093 // INLINEASM_BR ..., def %5:gr32, ..., def %6:gr32
12094 // %1:gr32 = COPY %6:gr32
12095 // %0:gr32 = COPY %5:gr32
12096 //
12097 // Given %0, we'd like to return $ebx in the first case and %5 in the second.
12098 // Given %1, we'd like to return $edx in the first case and %6 in the second.
12099 //
12100 // If a callbr has outputs, it will have a single mapping in FuncInfo.ValueMap
12101 // to a single virtreg (such as %0). The remaining outputs monotonically
12102 // increase in virtreg number from there. If a callbr has no outputs, then it
12103 // should not have a corresponding callbr landingpad; in fact, the callbr
12104 // landingpad would not even be able to refer to such a callbr.
12105 static Register FollowCopyChain(MachineRegisterInfo &MRI, Register Reg) {
12106   MachineInstr *MI = MRI.def_begin(Reg)->getParent();
12107   // There is definitely at least one copy.
12108   assert(MI->getOpcode() == TargetOpcode::COPY &&
12109          "start of copy chain MUST be COPY");
12110   Reg = MI->getOperand(1).getReg();
12111   MI = MRI.def_begin(Reg)->getParent();
12112   // There may be an optional second copy.
12113   if (MI->getOpcode() == TargetOpcode::COPY) {
12114     assert(Reg.isVirtual() && "expected COPY of virtual register");
12115     Reg = MI->getOperand(1).getReg();
12116     assert(Reg.isPhysical() && "expected COPY of physical register");
12117     MI = MRI.def_begin(Reg)->getParent();
12118   }
12119   // The start of the chain must be an INLINEASM_BR.
12120   assert(MI->getOpcode() == TargetOpcode::INLINEASM_BR &&
12121          "end of copy chain MUST be INLINEASM_BR");
12122   return Reg;
12123 }
12124 
12125 // We must do this walk rather than the simpler
12126 //   setValue(&I, getCopyFromRegs(CBR, CBR->getType()));
12127 // otherwise we will end up with copies of virtregs only valid along direct
12128 // edges.
12129 void SelectionDAGBuilder::visitCallBrLandingPad(const CallInst &I) {
12130   SmallVector<EVT, 8> ResultVTs;
12131   SmallVector<SDValue, 8> ResultValues;
12132   const auto *CBR =
12133       cast<CallBrInst>(I.getParent()->getUniquePredecessor()->getTerminator());
12134 
12135   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12136   const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
12137   MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
12138 
12139   unsigned InitialDef = FuncInfo.ValueMap[CBR];
12140   SDValue Chain = DAG.getRoot();
12141 
12142   // Re-parse the asm constraints string.
12143   TargetLowering::AsmOperandInfoVector TargetConstraints =
12144       TLI.ParseConstraints(DAG.getDataLayout(), TRI, *CBR);
12145   for (auto &T : TargetConstraints) {
12146     SDISelAsmOperandInfo OpInfo(T);
12147     if (OpInfo.Type != InlineAsm::isOutput)
12148       continue;
12149 
12150     // Pencil in OpInfo.ConstraintType and OpInfo.ConstraintVT based on the
12151     // individual constraint.
12152     TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
12153 
12154     switch (OpInfo.ConstraintType) {
12155     case TargetLowering::C_Register:
12156     case TargetLowering::C_RegisterClass: {
12157       // Fill in OpInfo.AssignedRegs.Regs.
12158       getRegistersForValue(DAG, getCurSDLoc(), OpInfo, OpInfo);
12159 
12160       // getRegistersForValue may produce 1 to many registers based on whether
12161       // the OpInfo.ConstraintVT is legal on the target or not.
12162       for (size_t i = 0, e = OpInfo.AssignedRegs.Regs.size(); i != e; ++i) {
12163         Register OriginalDef = FollowCopyChain(MRI, InitialDef++);
12164         if (Register::isPhysicalRegister(OriginalDef))
12165           FuncInfo.MBB->addLiveIn(OriginalDef);
12166         // Update the assigned registers to use the original defs.
12167         OpInfo.AssignedRegs.Regs[i] = OriginalDef;
12168       }
12169 
12170       SDValue V = OpInfo.AssignedRegs.getCopyFromRegs(
12171           DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, CBR);
12172       ResultValues.push_back(V);
12173       ResultVTs.push_back(OpInfo.ConstraintVT);
12174       break;
12175     }
12176     case TargetLowering::C_Other: {
12177       SDValue Flag;
12178       SDValue V = TLI.LowerAsmOutputForConstraint(Chain, Flag, getCurSDLoc(),
12179                                                   OpInfo, DAG);
12180       ++InitialDef;
12181       ResultValues.push_back(V);
12182       ResultVTs.push_back(OpInfo.ConstraintVT);
12183       break;
12184     }
12185     default:
12186       break;
12187     }
12188   }
12189   SDValue V = DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
12190                           DAG.getVTList(ResultVTs), ResultValues);
12191   setValue(&I, V);
12192 }
12193