xref: /llvm-project/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp (revision 535d8e8b92e3f8cf4107d9431012310c9a72c8d3)
1 //===- SelectionDAGBuilder.cpp - Selection-DAG building -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements routines for translating from LLVM IR into SelectionDAG IR.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "SelectionDAGBuilder.h"
14 #include "SDNodeDbgValue.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/BitVector.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/SmallSet.h"
21 #include "llvm/ADT/StringRef.h"
22 #include "llvm/ADT/Twine.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/Analysis/BranchProbabilityInfo.h"
25 #include "llvm/Analysis/ConstantFolding.h"
26 #include "llvm/Analysis/Loads.h"
27 #include "llvm/Analysis/MemoryLocation.h"
28 #include "llvm/Analysis/TargetLibraryInfo.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/Analysis/VectorUtils.h"
31 #include "llvm/CodeGen/Analysis.h"
32 #include "llvm/CodeGen/AssignmentTrackingAnalysis.h"
33 #include "llvm/CodeGen/CodeGenCommonISel.h"
34 #include "llvm/CodeGen/FunctionLoweringInfo.h"
35 #include "llvm/CodeGen/GCMetadata.h"
36 #include "llvm/CodeGen/ISDOpcodes.h"
37 #include "llvm/CodeGen/MachineBasicBlock.h"
38 #include "llvm/CodeGen/MachineFrameInfo.h"
39 #include "llvm/CodeGen/MachineFunction.h"
40 #include "llvm/CodeGen/MachineInstrBuilder.h"
41 #include "llvm/CodeGen/MachineInstrBundleIterator.h"
42 #include "llvm/CodeGen/MachineMemOperand.h"
43 #include "llvm/CodeGen/MachineModuleInfo.h"
44 #include "llvm/CodeGen/MachineOperand.h"
45 #include "llvm/CodeGen/MachineRegisterInfo.h"
46 #include "llvm/CodeGen/RuntimeLibcalls.h"
47 #include "llvm/CodeGen/SelectionDAG.h"
48 #include "llvm/CodeGen/SelectionDAGTargetInfo.h"
49 #include "llvm/CodeGen/StackMaps.h"
50 #include "llvm/CodeGen/SwiftErrorValueTracking.h"
51 #include "llvm/CodeGen/TargetFrameLowering.h"
52 #include "llvm/CodeGen/TargetInstrInfo.h"
53 #include "llvm/CodeGen/TargetOpcodes.h"
54 #include "llvm/CodeGen/TargetRegisterInfo.h"
55 #include "llvm/CodeGen/TargetSubtargetInfo.h"
56 #include "llvm/CodeGen/WinEHFuncInfo.h"
57 #include "llvm/IR/Argument.h"
58 #include "llvm/IR/Attributes.h"
59 #include "llvm/IR/BasicBlock.h"
60 #include "llvm/IR/CFG.h"
61 #include "llvm/IR/CallingConv.h"
62 #include "llvm/IR/Constant.h"
63 #include "llvm/IR/ConstantRange.h"
64 #include "llvm/IR/Constants.h"
65 #include "llvm/IR/DataLayout.h"
66 #include "llvm/IR/DebugInfo.h"
67 #include "llvm/IR/DebugInfoMetadata.h"
68 #include "llvm/IR/DerivedTypes.h"
69 #include "llvm/IR/DiagnosticInfo.h"
70 #include "llvm/IR/EHPersonalities.h"
71 #include "llvm/IR/Function.h"
72 #include "llvm/IR/GetElementPtrTypeIterator.h"
73 #include "llvm/IR/InlineAsm.h"
74 #include "llvm/IR/InstrTypes.h"
75 #include "llvm/IR/Instructions.h"
76 #include "llvm/IR/IntrinsicInst.h"
77 #include "llvm/IR/Intrinsics.h"
78 #include "llvm/IR/IntrinsicsAArch64.h"
79 #include "llvm/IR/IntrinsicsAMDGPU.h"
80 #include "llvm/IR/IntrinsicsWebAssembly.h"
81 #include "llvm/IR/LLVMContext.h"
82 #include "llvm/IR/Metadata.h"
83 #include "llvm/IR/Module.h"
84 #include "llvm/IR/Operator.h"
85 #include "llvm/IR/PatternMatch.h"
86 #include "llvm/IR/Statepoint.h"
87 #include "llvm/IR/Type.h"
88 #include "llvm/IR/User.h"
89 #include "llvm/IR/Value.h"
90 #include "llvm/MC/MCContext.h"
91 #include "llvm/Support/AtomicOrdering.h"
92 #include "llvm/Support/Casting.h"
93 #include "llvm/Support/CommandLine.h"
94 #include "llvm/Support/Compiler.h"
95 #include "llvm/Support/Debug.h"
96 #include "llvm/Support/MathExtras.h"
97 #include "llvm/Support/raw_ostream.h"
98 #include "llvm/Target/TargetIntrinsicInfo.h"
99 #include "llvm/Target/TargetMachine.h"
100 #include "llvm/Target/TargetOptions.h"
101 #include "llvm/TargetParser/Triple.h"
102 #include "llvm/Transforms/Utils/Local.h"
103 #include <cstddef>
104 #include <iterator>
105 #include <limits>
106 #include <optional>
107 #include <tuple>
108 
109 using namespace llvm;
110 using namespace PatternMatch;
111 using namespace SwitchCG;
112 
113 #define DEBUG_TYPE "isel"
114 
115 /// LimitFloatPrecision - Generate low-precision inline sequences for
116 /// some float libcalls (6, 8 or 12 bits).
117 static unsigned LimitFloatPrecision;
118 
119 static cl::opt<bool>
120     InsertAssertAlign("insert-assert-align", cl::init(true),
121                       cl::desc("Insert the experimental `assertalign` node."),
122                       cl::ReallyHidden);
123 
124 static cl::opt<unsigned, true>
125     LimitFPPrecision("limit-float-precision",
126                      cl::desc("Generate low-precision inline sequences "
127                               "for some float libcalls"),
128                      cl::location(LimitFloatPrecision), cl::Hidden,
129                      cl::init(0));
130 
131 static cl::opt<unsigned> SwitchPeelThreshold(
132     "switch-peel-threshold", cl::Hidden, cl::init(66),
133     cl::desc("Set the case probability threshold for peeling the case from a "
134              "switch statement. A value greater than 100 will void this "
135              "optimization"));
136 
137 // Limit the width of DAG chains. This is important in general to prevent
138 // DAG-based analysis from blowing up. For example, alias analysis and
139 // load clustering may not complete in reasonable time. It is difficult to
140 // recognize and avoid this situation within each individual analysis, and
141 // future analyses are likely to have the same behavior. Limiting DAG width is
142 // the safe approach and will be especially important with global DAGs.
143 //
144 // MaxParallelChains default is arbitrarily high to avoid affecting
145 // optimization, but could be lowered to improve compile time. Any ld-ld-st-st
146 // sequence over this should have been converted to llvm.memcpy by the
147 // frontend. It is easy to induce this behavior with .ll code such as:
148 // %buffer = alloca [4096 x i8]
149 // %data = load [4096 x i8]* %argPtr
150 // store [4096 x i8] %data, [4096 x i8]* %buffer
151 static const unsigned MaxParallelChains = 64;
152 
153 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
154                                       const SDValue *Parts, unsigned NumParts,
155                                       MVT PartVT, EVT ValueVT, const Value *V,
156                                       std::optional<CallingConv::ID> CC);
157 
158 /// getCopyFromParts - Create a value that contains the specified legal parts
159 /// combined into the value they represent.  If the parts combine to a type
160 /// larger than ValueVT then AssertOp can be used to specify whether the extra
161 /// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
162 /// (ISD::AssertSext).
163 static SDValue
164 getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts,
165                  unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V,
166                  std::optional<CallingConv::ID> CC = std::nullopt,
167                  std::optional<ISD::NodeType> AssertOp = std::nullopt) {
168   // Let the target assemble the parts if it wants to
169   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
170   if (SDValue Val = TLI.joinRegisterPartsIntoValue(DAG, DL, Parts, NumParts,
171                                                    PartVT, ValueVT, CC))
172     return Val;
173 
174   if (ValueVT.isVector())
175     return getCopyFromPartsVector(DAG, DL, Parts, NumParts, PartVT, ValueVT, V,
176                                   CC);
177 
178   assert(NumParts > 0 && "No parts to assemble!");
179   SDValue Val = Parts[0];
180 
181   if (NumParts > 1) {
182     // Assemble the value from multiple parts.
183     if (ValueVT.isInteger()) {
184       unsigned PartBits = PartVT.getSizeInBits();
185       unsigned ValueBits = ValueVT.getSizeInBits();
186 
187       // Assemble the power of 2 part.
188       unsigned RoundParts = llvm::bit_floor(NumParts);
189       unsigned RoundBits = PartBits * RoundParts;
190       EVT RoundVT = RoundBits == ValueBits ?
191         ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
192       SDValue Lo, Hi;
193 
194       EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
195 
196       if (RoundParts > 2) {
197         Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2,
198                               PartVT, HalfVT, V);
199         Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2,
200                               RoundParts / 2, PartVT, HalfVT, V);
201       } else {
202         Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]);
203         Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]);
204       }
205 
206       if (DAG.getDataLayout().isBigEndian())
207         std::swap(Lo, Hi);
208 
209       Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi);
210 
211       if (RoundParts < NumParts) {
212         // Assemble the trailing non-power-of-2 part.
213         unsigned OddParts = NumParts - RoundParts;
214         EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
215         Hi = getCopyFromParts(DAG, DL, Parts + RoundParts, OddParts, PartVT,
216                               OddVT, V, CC);
217 
218         // Combine the round and odd parts.
219         Lo = Val;
220         if (DAG.getDataLayout().isBigEndian())
221           std::swap(Lo, Hi);
222         EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
223         Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi);
224         Hi = DAG.getNode(ISD::SHL, DL, TotalVT, Hi,
225                          DAG.getConstant(Lo.getValueSizeInBits(), DL,
226                                          TLI.getShiftAmountTy(
227                                              TotalVT, DAG.getDataLayout())));
228         Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo);
229         Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi);
230       }
231     } else if (PartVT.isFloatingPoint()) {
232       // FP split into multiple FP parts (for ppcf128)
233       assert(ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
234              "Unexpected split");
235       SDValue Lo, Hi;
236       Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]);
237       Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]);
238       if (TLI.hasBigEndianPartOrdering(ValueVT, DAG.getDataLayout()))
239         std::swap(Lo, Hi);
240       Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi);
241     } else {
242       // FP split into integer parts (soft fp)
243       assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
244              !PartVT.isVector() && "Unexpected split");
245       EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
246       Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V, CC);
247     }
248   }
249 
250   // There is now one part, held in Val.  Correct it to match ValueVT.
251   // PartEVT is the type of the register class that holds the value.
252   // ValueVT is the type of the inline asm operation.
253   EVT PartEVT = Val.getValueType();
254 
255   if (PartEVT == ValueVT)
256     return Val;
257 
258   if (PartEVT.isInteger() && ValueVT.isFloatingPoint() &&
259       ValueVT.bitsLT(PartEVT)) {
260     // For an FP value in an integer part, we need to truncate to the right
261     // width first.
262     PartEVT = EVT::getIntegerVT(*DAG.getContext(),  ValueVT.getSizeInBits());
263     Val = DAG.getNode(ISD::TRUNCATE, DL, PartEVT, Val);
264   }
265 
266   // Handle types that have the same size.
267   if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits())
268     return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
269 
270   // Handle types with different sizes.
271   if (PartEVT.isInteger() && ValueVT.isInteger()) {
272     if (ValueVT.bitsLT(PartEVT)) {
273       // For a truncate, see if we have any information to
274       // indicate whether the truncated bits will always be
275       // zero or sign-extension.
276       if (AssertOp)
277         Val = DAG.getNode(*AssertOp, DL, PartEVT, Val,
278                           DAG.getValueType(ValueVT));
279       return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
280     }
281     return DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
282   }
283 
284   if (PartEVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
285     // FP_ROUND's are always exact here.
286     if (ValueVT.bitsLT(Val.getValueType()))
287       return DAG.getNode(
288           ISD::FP_ROUND, DL, ValueVT, Val,
289           DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout())));
290 
291     return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val);
292   }
293 
294   // Handle MMX to a narrower integer type by bitcasting MMX to integer and
295   // then truncating.
296   if (PartEVT == MVT::x86mmx && ValueVT.isInteger() &&
297       ValueVT.bitsLT(PartEVT)) {
298     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i64, Val);
299     return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
300   }
301 
302   report_fatal_error("Unknown mismatch in getCopyFromParts!");
303 }
304 
305 static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V,
306                                               const Twine &ErrMsg) {
307   const Instruction *I = dyn_cast_or_null<Instruction>(V);
308   if (!V)
309     return Ctx.emitError(ErrMsg);
310 
311   const char *AsmError = ", possible invalid constraint for vector type";
312   if (const CallInst *CI = dyn_cast<CallInst>(I))
313     if (CI->isInlineAsm())
314       return Ctx.emitError(I, ErrMsg + AsmError);
315 
316   return Ctx.emitError(I, ErrMsg);
317 }
318 
319 /// getCopyFromPartsVector - Create a value that contains the specified legal
320 /// parts combined into the value they represent.  If the parts combine to a
321 /// type larger than ValueVT then AssertOp can be used to specify whether the
322 /// extra bits are known to be zero (ISD::AssertZext) or sign extended from
323 /// ValueVT (ISD::AssertSext).
324 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
325                                       const SDValue *Parts, unsigned NumParts,
326                                       MVT PartVT, EVT ValueVT, const Value *V,
327                                       std::optional<CallingConv::ID> CallConv) {
328   assert(ValueVT.isVector() && "Not a vector value");
329   assert(NumParts > 0 && "No parts to assemble!");
330   const bool IsABIRegCopy = CallConv.has_value();
331 
332   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
333   SDValue Val = Parts[0];
334 
335   // Handle a multi-element vector.
336   if (NumParts > 1) {
337     EVT IntermediateVT;
338     MVT RegisterVT;
339     unsigned NumIntermediates;
340     unsigned NumRegs;
341 
342     if (IsABIRegCopy) {
343       NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
344           *DAG.getContext(), *CallConv, ValueVT, IntermediateVT,
345           NumIntermediates, RegisterVT);
346     } else {
347       NumRegs =
348           TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
349                                      NumIntermediates, RegisterVT);
350     }
351 
352     assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
353     NumParts = NumRegs; // Silence a compiler warning.
354     assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
355     assert(RegisterVT.getSizeInBits() ==
356            Parts[0].getSimpleValueType().getSizeInBits() &&
357            "Part type sizes don't match!");
358 
359     // Assemble the parts into intermediate operands.
360     SmallVector<SDValue, 8> Ops(NumIntermediates);
361     if (NumIntermediates == NumParts) {
362       // If the register was not expanded, truncate or copy the value,
363       // as appropriate.
364       for (unsigned i = 0; i != NumParts; ++i)
365         Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1,
366                                   PartVT, IntermediateVT, V, CallConv);
367     } else if (NumParts > 0) {
368       // If the intermediate type was expanded, build the intermediate
369       // operands from the parts.
370       assert(NumParts % NumIntermediates == 0 &&
371              "Must expand into a divisible number of parts!");
372       unsigned Factor = NumParts / NumIntermediates;
373       for (unsigned i = 0; i != NumIntermediates; ++i)
374         Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor,
375                                   PartVT, IntermediateVT, V, CallConv);
376     }
377 
378     // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
379     // intermediate operands.
380     EVT BuiltVectorTy =
381         IntermediateVT.isVector()
382             ? EVT::getVectorVT(
383                   *DAG.getContext(), IntermediateVT.getScalarType(),
384                   IntermediateVT.getVectorElementCount() * NumParts)
385             : EVT::getVectorVT(*DAG.getContext(),
386                                IntermediateVT.getScalarType(),
387                                NumIntermediates);
388     Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS
389                                                 : ISD::BUILD_VECTOR,
390                       DL, BuiltVectorTy, Ops);
391   }
392 
393   // There is now one part, held in Val.  Correct it to match ValueVT.
394   EVT PartEVT = Val.getValueType();
395 
396   if (PartEVT == ValueVT)
397     return Val;
398 
399   if (PartEVT.isVector()) {
400     // Vector/Vector bitcast.
401     if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
402       return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
403 
404     // If the parts vector has more elements than the value vector, then we
405     // have a vector widening case (e.g. <2 x float> -> <4 x float>).
406     // Extract the elements we want.
407     if (PartEVT.getVectorElementCount() != ValueVT.getVectorElementCount()) {
408       assert((PartEVT.getVectorElementCount().getKnownMinValue() >
409               ValueVT.getVectorElementCount().getKnownMinValue()) &&
410              (PartEVT.getVectorElementCount().isScalable() ==
411               ValueVT.getVectorElementCount().isScalable()) &&
412              "Cannot narrow, it would be a lossy transformation");
413       PartEVT =
414           EVT::getVectorVT(*DAG.getContext(), PartEVT.getVectorElementType(),
415                            ValueVT.getVectorElementCount());
416       Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, PartEVT, Val,
417                         DAG.getVectorIdxConstant(0, DL));
418       if (PartEVT == ValueVT)
419         return Val;
420       if (PartEVT.isInteger() && ValueVT.isFloatingPoint())
421         return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
422 
423       // Vector/Vector bitcast (e.g. <2 x bfloat> -> <2 x half>).
424       if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
425         return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
426     }
427 
428     // Promoted vector extract
429     return DAG.getAnyExtOrTrunc(Val, DL, ValueVT);
430   }
431 
432   // Trivial bitcast if the types are the same size and the destination
433   // vector type is legal.
434   if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits() &&
435       TLI.isTypeLegal(ValueVT))
436     return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
437 
438   if (ValueVT.getVectorNumElements() != 1) {
439      // Certain ABIs require that vectors are passed as integers. For vectors
440      // are the same size, this is an obvious bitcast.
441      if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) {
442        return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
443      } else if (ValueVT.bitsLT(PartEVT)) {
444        const uint64_t ValueSize = ValueVT.getFixedSizeInBits();
445        EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize);
446        // Drop the extra bits.
447        Val = DAG.getNode(ISD::TRUNCATE, DL, IntermediateType, Val);
448        return DAG.getBitcast(ValueVT, Val);
449      }
450 
451      diagnosePossiblyInvalidConstraint(
452          *DAG.getContext(), V, "non-trivial scalar-to-vector conversion");
453      return DAG.getUNDEF(ValueVT);
454   }
455 
456   // Handle cases such as i8 -> <1 x i1>
457   EVT ValueSVT = ValueVT.getVectorElementType();
458   if (ValueVT.getVectorNumElements() == 1 && ValueSVT != PartEVT) {
459     unsigned ValueSize = ValueSVT.getSizeInBits();
460     if (ValueSize == PartEVT.getSizeInBits()) {
461       Val = DAG.getNode(ISD::BITCAST, DL, ValueSVT, Val);
462     } else if (ValueSVT.isFloatingPoint() && PartEVT.isInteger()) {
463       // It's possible a scalar floating point type gets softened to integer and
464       // then promoted to a larger integer. If PartEVT is the larger integer
465       // we need to truncate it and then bitcast to the FP type.
466       assert(ValueSVT.bitsLT(PartEVT) && "Unexpected types");
467       EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize);
468       Val = DAG.getNode(ISD::TRUNCATE, DL, IntermediateType, Val);
469       Val = DAG.getBitcast(ValueSVT, Val);
470     } else {
471       Val = ValueVT.isFloatingPoint()
472                 ? DAG.getFPExtendOrRound(Val, DL, ValueSVT)
473                 : DAG.getAnyExtOrTrunc(Val, DL, ValueSVT);
474     }
475   }
476 
477   return DAG.getBuildVector(ValueVT, DL, Val);
478 }
479 
480 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl,
481                                  SDValue Val, SDValue *Parts, unsigned NumParts,
482                                  MVT PartVT, const Value *V,
483                                  std::optional<CallingConv::ID> CallConv);
484 
485 /// getCopyToParts - Create a series of nodes that contain the specified value
486 /// split into legal parts.  If the parts contain more bits than Val, then, for
487 /// integers, ExtendKind can be used to specify how to generate the extra bits.
488 static void
489 getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
490                unsigned NumParts, MVT PartVT, const Value *V,
491                std::optional<CallingConv::ID> CallConv = std::nullopt,
492                ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
493   // Let the target split the parts if it wants to
494   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
495   if (TLI.splitValueIntoRegisterParts(DAG, DL, Val, Parts, NumParts, PartVT,
496                                       CallConv))
497     return;
498   EVT ValueVT = Val.getValueType();
499 
500   // Handle the vector case separately.
501   if (ValueVT.isVector())
502     return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V,
503                                 CallConv);
504 
505   unsigned OrigNumParts = NumParts;
506   assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) &&
507          "Copying to an illegal type!");
508 
509   if (NumParts == 0)
510     return;
511 
512   assert(!ValueVT.isVector() && "Vector case handled elsewhere");
513   EVT PartEVT = PartVT;
514   if (PartEVT == ValueVT) {
515     assert(NumParts == 1 && "No-op copy with multiple parts!");
516     Parts[0] = Val;
517     return;
518   }
519 
520   unsigned PartBits = PartVT.getSizeInBits();
521   if (NumParts * PartBits > ValueVT.getSizeInBits()) {
522     // If the parts cover more bits than the value has, promote the value.
523     if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
524       assert(NumParts == 1 && "Do not know what to promote to!");
525       Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val);
526     } else {
527       if (ValueVT.isFloatingPoint()) {
528         // FP values need to be bitcast, then extended if they are being put
529         // into a larger container.
530         ValueVT = EVT::getIntegerVT(*DAG.getContext(),  ValueVT.getSizeInBits());
531         Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
532       }
533       assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
534              ValueVT.isInteger() &&
535              "Unknown mismatch!");
536       ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
537       Val = DAG.getNode(ExtendKind, DL, ValueVT, Val);
538       if (PartVT == MVT::x86mmx)
539         Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
540     }
541   } else if (PartBits == ValueVT.getSizeInBits()) {
542     // Different types of the same size.
543     assert(NumParts == 1 && PartEVT != ValueVT);
544     Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
545   } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
546     // If the parts cover less bits than value has, truncate the value.
547     assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
548            ValueVT.isInteger() &&
549            "Unknown mismatch!");
550     ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
551     Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
552     if (PartVT == MVT::x86mmx)
553       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
554   }
555 
556   // The value may have changed - recompute ValueVT.
557   ValueVT = Val.getValueType();
558   assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
559          "Failed to tile the value with PartVT!");
560 
561   if (NumParts == 1) {
562     if (PartEVT != ValueVT) {
563       diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
564                                         "scalar-to-vector conversion failed");
565       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
566     }
567 
568     Parts[0] = Val;
569     return;
570   }
571 
572   // Expand the value into multiple parts.
573   if (NumParts & (NumParts - 1)) {
574     // The number of parts is not a power of 2.  Split off and copy the tail.
575     assert(PartVT.isInteger() && ValueVT.isInteger() &&
576            "Do not know what to expand to!");
577     unsigned RoundParts = llvm::bit_floor(NumParts);
578     unsigned RoundBits = RoundParts * PartBits;
579     unsigned OddParts = NumParts - RoundParts;
580     SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val,
581       DAG.getShiftAmountConstant(RoundBits, ValueVT, DL));
582 
583     getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V,
584                    CallConv);
585 
586     if (DAG.getDataLayout().isBigEndian())
587       // The odd parts were reversed by getCopyToParts - unreverse them.
588       std::reverse(Parts + RoundParts, Parts + NumParts);
589 
590     NumParts = RoundParts;
591     ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
592     Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
593   }
594 
595   // The number of parts is a power of 2.  Repeatedly bisect the value using
596   // EXTRACT_ELEMENT.
597   Parts[0] = DAG.getNode(ISD::BITCAST, DL,
598                          EVT::getIntegerVT(*DAG.getContext(),
599                                            ValueVT.getSizeInBits()),
600                          Val);
601 
602   for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
603     for (unsigned i = 0; i < NumParts; i += StepSize) {
604       unsigned ThisBits = StepSize * PartBits / 2;
605       EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
606       SDValue &Part0 = Parts[i];
607       SDValue &Part1 = Parts[i+StepSize/2];
608 
609       Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
610                           ThisVT, Part0, DAG.getIntPtrConstant(1, DL));
611       Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
612                           ThisVT, Part0, DAG.getIntPtrConstant(0, DL));
613 
614       if (ThisBits == PartBits && ThisVT != PartVT) {
615         Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0);
616         Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1);
617       }
618     }
619   }
620 
621   if (DAG.getDataLayout().isBigEndian())
622     std::reverse(Parts, Parts + OrigNumParts);
623 }
624 
625 static SDValue widenVectorToPartType(SelectionDAG &DAG, SDValue Val,
626                                      const SDLoc &DL, EVT PartVT) {
627   if (!PartVT.isVector())
628     return SDValue();
629 
630   EVT ValueVT = Val.getValueType();
631   EVT PartEVT = PartVT.getVectorElementType();
632   EVT ValueEVT = ValueVT.getVectorElementType();
633   ElementCount PartNumElts = PartVT.getVectorElementCount();
634   ElementCount ValueNumElts = ValueVT.getVectorElementCount();
635 
636   // We only support widening vectors with equivalent element types and
637   // fixed/scalable properties. If a target needs to widen a fixed-length type
638   // to a scalable one, it should be possible to use INSERT_SUBVECTOR below.
639   if (ElementCount::isKnownLE(PartNumElts, ValueNumElts) ||
640       PartNumElts.isScalable() != ValueNumElts.isScalable())
641     return SDValue();
642 
643   // Have a try for bf16 because some targets share its ABI with fp16.
644   if (ValueEVT == MVT::bf16 && PartEVT == MVT::f16) {
645     assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) &&
646            "Cannot widen to illegal type");
647     Val = DAG.getNode(ISD::BITCAST, DL,
648                       ValueVT.changeVectorElementType(MVT::f16), Val);
649   } else if (PartEVT != ValueEVT) {
650     return SDValue();
651   }
652 
653   // Widening a scalable vector to another scalable vector is done by inserting
654   // the vector into a larger undef one.
655   if (PartNumElts.isScalable())
656     return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
657                        Val, DAG.getVectorIdxConstant(0, DL));
658 
659   // Vector widening case, e.g. <2 x float> -> <4 x float>.  Shuffle in
660   // undef elements.
661   SmallVector<SDValue, 16> Ops;
662   DAG.ExtractVectorElements(Val, Ops);
663   SDValue EltUndef = DAG.getUNDEF(PartEVT);
664   Ops.append((PartNumElts - ValueNumElts).getFixedValue(), EltUndef);
665 
666   // FIXME: Use CONCAT for 2x -> 4x.
667   return DAG.getBuildVector(PartVT, DL, Ops);
668 }
669 
670 /// getCopyToPartsVector - Create a series of nodes that contain the specified
671 /// value split into legal parts.
672 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
673                                  SDValue Val, SDValue *Parts, unsigned NumParts,
674                                  MVT PartVT, const Value *V,
675                                  std::optional<CallingConv::ID> CallConv) {
676   EVT ValueVT = Val.getValueType();
677   assert(ValueVT.isVector() && "Not a vector");
678   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
679   const bool IsABIRegCopy = CallConv.has_value();
680 
681   if (NumParts == 1) {
682     EVT PartEVT = PartVT;
683     if (PartEVT == ValueVT) {
684       // Nothing to do.
685     } else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
686       // Bitconvert vector->vector case.
687       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
688     } else if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, PartVT)) {
689       Val = Widened;
690     } else if (PartVT.isVector() &&
691                PartEVT.getVectorElementType().bitsGE(
692                    ValueVT.getVectorElementType()) &&
693                PartEVT.getVectorElementCount() ==
694                    ValueVT.getVectorElementCount()) {
695 
696       // Promoted vector extract
697       Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
698     } else if (PartEVT.isVector() &&
699                PartEVT.getVectorElementType() !=
700                    ValueVT.getVectorElementType() &&
701                TLI.getTypeAction(*DAG.getContext(), ValueVT) ==
702                    TargetLowering::TypeWidenVector) {
703       // Combination of widening and promotion.
704       EVT WidenVT =
705           EVT::getVectorVT(*DAG.getContext(), ValueVT.getVectorElementType(),
706                            PartVT.getVectorElementCount());
707       SDValue Widened = widenVectorToPartType(DAG, Val, DL, WidenVT);
708       Val = DAG.getAnyExtOrTrunc(Widened, DL, PartVT);
709     } else {
710       // Don't extract an integer from a float vector. This can happen if the
711       // FP type gets softened to integer and then promoted. The promotion
712       // prevents it from being picked up by the earlier bitcast case.
713       if (ValueVT.getVectorElementCount().isScalar() &&
714           (!ValueVT.isFloatingPoint() || !PartVT.isInteger())) {
715         Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, PartVT, Val,
716                           DAG.getVectorIdxConstant(0, DL));
717       } else {
718         uint64_t ValueSize = ValueVT.getFixedSizeInBits();
719         assert(PartVT.getFixedSizeInBits() > ValueSize &&
720                "lossy conversion of vector to scalar type");
721         EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize);
722         Val = DAG.getBitcast(IntermediateType, Val);
723         Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
724       }
725     }
726 
727     assert(Val.getValueType() == PartVT && "Unexpected vector part value type");
728     Parts[0] = Val;
729     return;
730   }
731 
732   // Handle a multi-element vector.
733   EVT IntermediateVT;
734   MVT RegisterVT;
735   unsigned NumIntermediates;
736   unsigned NumRegs;
737   if (IsABIRegCopy) {
738     NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
739         *DAG.getContext(), *CallConv, ValueVT, IntermediateVT, NumIntermediates,
740         RegisterVT);
741   } else {
742     NumRegs =
743         TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
744                                    NumIntermediates, RegisterVT);
745   }
746 
747   assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
748   NumParts = NumRegs; // Silence a compiler warning.
749   assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
750 
751   assert(IntermediateVT.isScalableVector() == ValueVT.isScalableVector() &&
752          "Mixing scalable and fixed vectors when copying in parts");
753 
754   std::optional<ElementCount> DestEltCnt;
755 
756   if (IntermediateVT.isVector())
757     DestEltCnt = IntermediateVT.getVectorElementCount() * NumIntermediates;
758   else
759     DestEltCnt = ElementCount::getFixed(NumIntermediates);
760 
761   EVT BuiltVectorTy = EVT::getVectorVT(
762       *DAG.getContext(), IntermediateVT.getScalarType(), *DestEltCnt);
763 
764   if (ValueVT == BuiltVectorTy) {
765     // Nothing to do.
766   } else if (ValueVT.getSizeInBits() == BuiltVectorTy.getSizeInBits()) {
767     // Bitconvert vector->vector case.
768     Val = DAG.getNode(ISD::BITCAST, DL, BuiltVectorTy, Val);
769   } else {
770     if (BuiltVectorTy.getVectorElementType().bitsGT(
771             ValueVT.getVectorElementType())) {
772       // Integer promotion.
773       ValueVT = EVT::getVectorVT(*DAG.getContext(),
774                                  BuiltVectorTy.getVectorElementType(),
775                                  ValueVT.getVectorElementCount());
776       Val = DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
777     }
778 
779     if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, BuiltVectorTy)) {
780       Val = Widened;
781     }
782   }
783 
784   assert(Val.getValueType() == BuiltVectorTy && "Unexpected vector value type");
785 
786   // Split the vector into intermediate operands.
787   SmallVector<SDValue, 8> Ops(NumIntermediates);
788   for (unsigned i = 0; i != NumIntermediates; ++i) {
789     if (IntermediateVT.isVector()) {
790       // This does something sensible for scalable vectors - see the
791       // definition of EXTRACT_SUBVECTOR for further details.
792       unsigned IntermediateNumElts = IntermediateVT.getVectorMinNumElements();
793       Ops[i] =
794           DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, IntermediateVT, Val,
795                       DAG.getVectorIdxConstant(i * IntermediateNumElts, DL));
796     } else {
797       Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, IntermediateVT, Val,
798                            DAG.getVectorIdxConstant(i, DL));
799     }
800   }
801 
802   // Split the intermediate operands into legal parts.
803   if (NumParts == NumIntermediates) {
804     // If the register was not expanded, promote or copy the value,
805     // as appropriate.
806     for (unsigned i = 0; i != NumParts; ++i)
807       getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V, CallConv);
808   } else if (NumParts > 0) {
809     // If the intermediate type was expanded, split each the value into
810     // legal parts.
811     assert(NumIntermediates != 0 && "division by zero");
812     assert(NumParts % NumIntermediates == 0 &&
813            "Must expand into a divisible number of parts!");
814     unsigned Factor = NumParts / NumIntermediates;
815     for (unsigned i = 0; i != NumIntermediates; ++i)
816       getCopyToParts(DAG, DL, Ops[i], &Parts[i * Factor], Factor, PartVT, V,
817                      CallConv);
818   }
819 }
820 
821 RegsForValue::RegsForValue(const SmallVector<unsigned, 4> &regs, MVT regvt,
822                            EVT valuevt, std::optional<CallingConv::ID> CC)
823     : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs),
824       RegCount(1, regs.size()), CallConv(CC) {}
825 
826 RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
827                            const DataLayout &DL, unsigned Reg, Type *Ty,
828                            std::optional<CallingConv::ID> CC) {
829   ComputeValueVTs(TLI, DL, Ty, ValueVTs);
830 
831   CallConv = CC;
832 
833   for (EVT ValueVT : ValueVTs) {
834     unsigned NumRegs =
835         isABIMangled()
836             ? TLI.getNumRegistersForCallingConv(Context, *CC, ValueVT)
837             : TLI.getNumRegisters(Context, ValueVT);
838     MVT RegisterVT =
839         isABIMangled()
840             ? TLI.getRegisterTypeForCallingConv(Context, *CC, ValueVT)
841             : TLI.getRegisterType(Context, ValueVT);
842     for (unsigned i = 0; i != NumRegs; ++i)
843       Regs.push_back(Reg + i);
844     RegVTs.push_back(RegisterVT);
845     RegCount.push_back(NumRegs);
846     Reg += NumRegs;
847   }
848 }
849 
850 SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
851                                       FunctionLoweringInfo &FuncInfo,
852                                       const SDLoc &dl, SDValue &Chain,
853                                       SDValue *Glue, const Value *V) const {
854   // A Value with type {} or [0 x %t] needs no registers.
855   if (ValueVTs.empty())
856     return SDValue();
857 
858   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
859 
860   // Assemble the legal parts into the final values.
861   SmallVector<SDValue, 4> Values(ValueVTs.size());
862   SmallVector<SDValue, 8> Parts;
863   for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
864     // Copy the legal parts from the registers.
865     EVT ValueVT = ValueVTs[Value];
866     unsigned NumRegs = RegCount[Value];
867     MVT RegisterVT = isABIMangled()
868                          ? TLI.getRegisterTypeForCallingConv(
869                                *DAG.getContext(), *CallConv, RegVTs[Value])
870                          : RegVTs[Value];
871 
872     Parts.resize(NumRegs);
873     for (unsigned i = 0; i != NumRegs; ++i) {
874       SDValue P;
875       if (!Glue) {
876         P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
877       } else {
878         P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Glue);
879         *Glue = P.getValue(2);
880       }
881 
882       Chain = P.getValue(1);
883       Parts[i] = P;
884 
885       // If the source register was virtual and if we know something about it,
886       // add an assert node.
887       if (!Register::isVirtualRegister(Regs[Part + i]) ||
888           !RegisterVT.isInteger())
889         continue;
890 
891       const FunctionLoweringInfo::LiveOutInfo *LOI =
892         FuncInfo.GetLiveOutRegInfo(Regs[Part+i]);
893       if (!LOI)
894         continue;
895 
896       unsigned RegSize = RegisterVT.getScalarSizeInBits();
897       unsigned NumSignBits = LOI->NumSignBits;
898       unsigned NumZeroBits = LOI->Known.countMinLeadingZeros();
899 
900       if (NumZeroBits == RegSize) {
901         // The current value is a zero.
902         // Explicitly express that as it would be easier for
903         // optimizations to kick in.
904         Parts[i] = DAG.getConstant(0, dl, RegisterVT);
905         continue;
906       }
907 
908       // FIXME: We capture more information than the dag can represent.  For
909       // now, just use the tightest assertzext/assertsext possible.
910       bool isSExt;
911       EVT FromVT(MVT::Other);
912       if (NumZeroBits) {
913         FromVT = EVT::getIntegerVT(*DAG.getContext(), RegSize - NumZeroBits);
914         isSExt = false;
915       } else if (NumSignBits > 1) {
916         FromVT =
917             EVT::getIntegerVT(*DAG.getContext(), RegSize - NumSignBits + 1);
918         isSExt = true;
919       } else {
920         continue;
921       }
922       // Add an assertion node.
923       assert(FromVT != MVT::Other);
924       Parts[i] = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
925                              RegisterVT, P, DAG.getValueType(FromVT));
926     }
927 
928     Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(), NumRegs,
929                                      RegisterVT, ValueVT, V, CallConv);
930     Part += NumRegs;
931     Parts.clear();
932   }
933 
934   return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(ValueVTs), Values);
935 }
936 
937 void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG,
938                                  const SDLoc &dl, SDValue &Chain, SDValue *Glue,
939                                  const Value *V,
940                                  ISD::NodeType PreferredExtendType) const {
941   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
942   ISD::NodeType ExtendKind = PreferredExtendType;
943 
944   // Get the list of the values's legal parts.
945   unsigned NumRegs = Regs.size();
946   SmallVector<SDValue, 8> Parts(NumRegs);
947   for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
948     unsigned NumParts = RegCount[Value];
949 
950     MVT RegisterVT = isABIMangled()
951                          ? TLI.getRegisterTypeForCallingConv(
952                                *DAG.getContext(), *CallConv, RegVTs[Value])
953                          : RegVTs[Value];
954 
955     if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT))
956       ExtendKind = ISD::ZERO_EXTEND;
957 
958     getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value), &Parts[Part],
959                    NumParts, RegisterVT, V, CallConv, ExtendKind);
960     Part += NumParts;
961   }
962 
963   // Copy the parts into the registers.
964   SmallVector<SDValue, 8> Chains(NumRegs);
965   for (unsigned i = 0; i != NumRegs; ++i) {
966     SDValue Part;
967     if (!Glue) {
968       Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
969     } else {
970       Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Glue);
971       *Glue = Part.getValue(1);
972     }
973 
974     Chains[i] = Part.getValue(0);
975   }
976 
977   if (NumRegs == 1 || Glue)
978     // If NumRegs > 1 && Glue is used then the use of the last CopyToReg is
979     // flagged to it. That is the CopyToReg nodes and the user are considered
980     // a single scheduling unit. If we create a TokenFactor and return it as
981     // chain, then the TokenFactor is both a predecessor (operand) of the
982     // user as well as a successor (the TF operands are flagged to the user).
983     // c1, f1 = CopyToReg
984     // c2, f2 = CopyToReg
985     // c3     = TokenFactor c1, c2
986     // ...
987     //        = op c3, ..., f2
988     Chain = Chains[NumRegs-1];
989   else
990     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
991 }
992 
993 void RegsForValue::AddInlineAsmOperands(InlineAsm::Kind Code, bool HasMatching,
994                                         unsigned MatchingIdx, const SDLoc &dl,
995                                         SelectionDAG &DAG,
996                                         std::vector<SDValue> &Ops) const {
997   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
998 
999   InlineAsm::Flag Flag(Code, Regs.size());
1000   if (HasMatching)
1001     Flag.setMatchingOp(MatchingIdx);
1002   else if (!Regs.empty() && Register::isVirtualRegister(Regs.front())) {
1003     // Put the register class of the virtual registers in the flag word.  That
1004     // way, later passes can recompute register class constraints for inline
1005     // assembly as well as normal instructions.
1006     // Don't do this for tied operands that can use the regclass information
1007     // from the def.
1008     const MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
1009     const TargetRegisterClass *RC = MRI.getRegClass(Regs.front());
1010     Flag.setRegClass(RC->getID());
1011   }
1012 
1013   SDValue Res = DAG.getTargetConstant(Flag, dl, MVT::i32);
1014   Ops.push_back(Res);
1015 
1016   if (Code == InlineAsm::Kind::Clobber) {
1017     // Clobbers should always have a 1:1 mapping with registers, and may
1018     // reference registers that have illegal (e.g. vector) types. Hence, we
1019     // shouldn't try to apply any sort of splitting logic to them.
1020     assert(Regs.size() == RegVTs.size() && Regs.size() == ValueVTs.size() &&
1021            "No 1:1 mapping from clobbers to regs?");
1022     Register SP = TLI.getStackPointerRegisterToSaveRestore();
1023     (void)SP;
1024     for (unsigned I = 0, E = ValueVTs.size(); I != E; ++I) {
1025       Ops.push_back(DAG.getRegister(Regs[I], RegVTs[I]));
1026       assert(
1027           (Regs[I] != SP ||
1028            DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment()) &&
1029           "If we clobbered the stack pointer, MFI should know about it.");
1030     }
1031     return;
1032   }
1033 
1034   for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
1035     MVT RegisterVT = RegVTs[Value];
1036     unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value],
1037                                            RegisterVT);
1038     for (unsigned i = 0; i != NumRegs; ++i) {
1039       assert(Reg < Regs.size() && "Mismatch in # registers expected");
1040       unsigned TheReg = Regs[Reg++];
1041       Ops.push_back(DAG.getRegister(TheReg, RegisterVT));
1042     }
1043   }
1044 }
1045 
1046 SmallVector<std::pair<unsigned, TypeSize>, 4>
1047 RegsForValue::getRegsAndSizes() const {
1048   SmallVector<std::pair<unsigned, TypeSize>, 4> OutVec;
1049   unsigned I = 0;
1050   for (auto CountAndVT : zip_first(RegCount, RegVTs)) {
1051     unsigned RegCount = std::get<0>(CountAndVT);
1052     MVT RegisterVT = std::get<1>(CountAndVT);
1053     TypeSize RegisterSize = RegisterVT.getSizeInBits();
1054     for (unsigned E = I + RegCount; I != E; ++I)
1055       OutVec.push_back(std::make_pair(Regs[I], RegisterSize));
1056   }
1057   return OutVec;
1058 }
1059 
1060 void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis *aa,
1061                                AssumptionCache *ac,
1062                                const TargetLibraryInfo *li) {
1063   AA = aa;
1064   AC = ac;
1065   GFI = gfi;
1066   LibInfo = li;
1067   Context = DAG.getContext();
1068   LPadToCallSiteMap.clear();
1069   SL->init(DAG.getTargetLoweringInfo(), TM, DAG.getDataLayout());
1070   AssignmentTrackingEnabled = isAssignmentTrackingEnabled(
1071       *DAG.getMachineFunction().getFunction().getParent());
1072 }
1073 
1074 void SelectionDAGBuilder::clear() {
1075   NodeMap.clear();
1076   UnusedArgNodeMap.clear();
1077   PendingLoads.clear();
1078   PendingExports.clear();
1079   PendingConstrainedFP.clear();
1080   PendingConstrainedFPStrict.clear();
1081   CurInst = nullptr;
1082   HasTailCall = false;
1083   SDNodeOrder = LowestSDNodeOrder;
1084   StatepointLowering.clear();
1085 }
1086 
1087 void SelectionDAGBuilder::clearDanglingDebugInfo() {
1088   DanglingDebugInfoMap.clear();
1089 }
1090 
1091 // Update DAG root to include dependencies on Pending chains.
1092 SDValue SelectionDAGBuilder::updateRoot(SmallVectorImpl<SDValue> &Pending) {
1093   SDValue Root = DAG.getRoot();
1094 
1095   if (Pending.empty())
1096     return Root;
1097 
1098   // Add current root to PendingChains, unless we already indirectly
1099   // depend on it.
1100   if (Root.getOpcode() != ISD::EntryToken) {
1101     unsigned i = 0, e = Pending.size();
1102     for (; i != e; ++i) {
1103       assert(Pending[i].getNode()->getNumOperands() > 1);
1104       if (Pending[i].getNode()->getOperand(0) == Root)
1105         break;  // Don't add the root if we already indirectly depend on it.
1106     }
1107 
1108     if (i == e)
1109       Pending.push_back(Root);
1110   }
1111 
1112   if (Pending.size() == 1)
1113     Root = Pending[0];
1114   else
1115     Root = DAG.getTokenFactor(getCurSDLoc(), Pending);
1116 
1117   DAG.setRoot(Root);
1118   Pending.clear();
1119   return Root;
1120 }
1121 
1122 SDValue SelectionDAGBuilder::getMemoryRoot() {
1123   return updateRoot(PendingLoads);
1124 }
1125 
1126 SDValue SelectionDAGBuilder::getRoot() {
1127   // Chain up all pending constrained intrinsics together with all
1128   // pending loads, by simply appending them to PendingLoads and
1129   // then calling getMemoryRoot().
1130   PendingLoads.reserve(PendingLoads.size() +
1131                        PendingConstrainedFP.size() +
1132                        PendingConstrainedFPStrict.size());
1133   PendingLoads.append(PendingConstrainedFP.begin(),
1134                       PendingConstrainedFP.end());
1135   PendingLoads.append(PendingConstrainedFPStrict.begin(),
1136                       PendingConstrainedFPStrict.end());
1137   PendingConstrainedFP.clear();
1138   PendingConstrainedFPStrict.clear();
1139   return getMemoryRoot();
1140 }
1141 
1142 SDValue SelectionDAGBuilder::getControlRoot() {
1143   // We need to emit pending fpexcept.strict constrained intrinsics,
1144   // so append them to the PendingExports list.
1145   PendingExports.append(PendingConstrainedFPStrict.begin(),
1146                         PendingConstrainedFPStrict.end());
1147   PendingConstrainedFPStrict.clear();
1148   return updateRoot(PendingExports);
1149 }
1150 
1151 void SelectionDAGBuilder::handleDebugDeclare(Value *Address,
1152                                              DILocalVariable *Variable,
1153                                              DIExpression *Expression,
1154                                              DebugLoc DL) {
1155   assert(Variable && "Missing variable");
1156 
1157   // Check if address has undef value.
1158   if (!Address || isa<UndefValue>(Address) ||
1159       (Address->use_empty() && !isa<Argument>(Address))) {
1160     LLVM_DEBUG(
1161         dbgs()
1162         << "dbg_declare: Dropping debug info (bad/undef/unused-arg address)\n");
1163     return;
1164   }
1165 
1166   bool IsParameter = Variable->isParameter() || isa<Argument>(Address);
1167 
1168   SDValue &N = NodeMap[Address];
1169   if (!N.getNode() && isa<Argument>(Address))
1170     // Check unused arguments map.
1171     N = UnusedArgNodeMap[Address];
1172   SDDbgValue *SDV;
1173   if (N.getNode()) {
1174     if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
1175       Address = BCI->getOperand(0);
1176     // Parameters are handled specially.
1177     auto *FINode = dyn_cast<FrameIndexSDNode>(N.getNode());
1178     if (IsParameter && FINode) {
1179       // Byval parameter. We have a frame index at this point.
1180       SDV = DAG.getFrameIndexDbgValue(Variable, Expression, FINode->getIndex(),
1181                                       /*IsIndirect*/ true, DL, SDNodeOrder);
1182     } else if (isa<Argument>(Address)) {
1183       // Address is an argument, so try to emit its dbg value using
1184       // virtual register info from the FuncInfo.ValueMap.
1185       EmitFuncArgumentDbgValue(Address, Variable, Expression, DL,
1186                                FuncArgumentDbgValueKind::Declare, N);
1187       return;
1188     } else {
1189       SDV = DAG.getDbgValue(Variable, Expression, N.getNode(), N.getResNo(),
1190                             true, DL, SDNodeOrder);
1191     }
1192     DAG.AddDbgValue(SDV, IsParameter);
1193   } else {
1194     // If Address is an argument then try to emit its dbg value using
1195     // virtual register info from the FuncInfo.ValueMap.
1196     if (!EmitFuncArgumentDbgValue(Address, Variable, Expression, DL,
1197                                   FuncArgumentDbgValueKind::Declare, N)) {
1198       LLVM_DEBUG(dbgs() << "dbg_declare: Dropping debug info"
1199                         << " (could not emit func-arg dbg_value)\n");
1200     }
1201   }
1202   return;
1203 }
1204 
1205 void SelectionDAGBuilder::visitDbgInfo(const Instruction &I) {
1206   // Add SDDbgValue nodes for any var locs here. Do so before updating
1207   // SDNodeOrder, as this mapping is {Inst -> Locs BEFORE Inst}.
1208   if (FunctionVarLocs const *FnVarLocs = DAG.getFunctionVarLocs()) {
1209     // Add SDDbgValue nodes for any var locs here. Do so before updating
1210     // SDNodeOrder, as this mapping is {Inst -> Locs BEFORE Inst}.
1211     for (auto It = FnVarLocs->locs_begin(&I), End = FnVarLocs->locs_end(&I);
1212          It != End; ++It) {
1213       auto *Var = FnVarLocs->getDILocalVariable(It->VariableID);
1214       dropDanglingDebugInfo(Var, It->Expr);
1215       if (It->Values.isKillLocation(It->Expr)) {
1216         handleKillDebugValue(Var, It->Expr, It->DL, SDNodeOrder);
1217         continue;
1218       }
1219       SmallVector<Value *> Values(It->Values.location_ops());
1220       if (!handleDebugValue(Values, Var, It->Expr, It->DL, SDNodeOrder,
1221                             It->Values.hasArgList())) {
1222         SmallVector<Value *, 4> Vals;
1223         for (Value *V : It->Values.location_ops())
1224           Vals.push_back(V);
1225         addDanglingDebugInfo(Vals,
1226                              FnVarLocs->getDILocalVariable(It->VariableID),
1227                              It->Expr, Vals.size() > 1, It->DL, SDNodeOrder);
1228       }
1229     }
1230   }
1231 
1232   // Is there is any debug-info attached to this instruction, in the form of
1233   // DPValue non-instruction debug-info records.
1234   for (DPValue &DPV : I.getDbgValueRange()) {
1235     DILocalVariable *Variable = DPV.getVariable();
1236     DIExpression *Expression = DPV.getExpression();
1237     dropDanglingDebugInfo(Variable, Expression);
1238 
1239     if (DPV.getType() == DPValue::LocationType::Declare) {
1240       if (FuncInfo.PreprocessedDPVDeclares.contains(&DPV))
1241         continue;
1242       LLVM_DEBUG(dbgs() << "SelectionDAG visiting dbg_declare: " << DPV
1243                         << "\n");
1244       handleDebugDeclare(DPV.getVariableLocationOp(0), Variable, Expression,
1245                          DPV.getDebugLoc());
1246       continue;
1247     }
1248 
1249     // A DPValue with no locations is a kill location.
1250     SmallVector<Value *, 4> Values(DPV.location_ops());
1251     if (Values.empty()) {
1252       handleKillDebugValue(Variable, Expression, DPV.getDebugLoc(),
1253                            SDNodeOrder);
1254       continue;
1255     }
1256 
1257     // A DPValue with an undef or absent location is also a kill location.
1258     if (llvm::any_of(Values,
1259                      [](Value *V) { return !V || isa<UndefValue>(V); })) {
1260       handleKillDebugValue(Variable, Expression, DPV.getDebugLoc(),
1261                            SDNodeOrder);
1262       continue;
1263     }
1264 
1265     bool IsVariadic = DPV.hasArgList();
1266     if (!handleDebugValue(Values, Variable, Expression, DPV.getDebugLoc(),
1267                           SDNodeOrder, IsVariadic)) {
1268       addDanglingDebugInfo(Values, Variable, Expression, IsVariadic,
1269                            DPV.getDebugLoc(), SDNodeOrder);
1270     }
1271   }
1272 }
1273 
1274 void SelectionDAGBuilder::visit(const Instruction &I) {
1275   visitDbgInfo(I);
1276 
1277   // Set up outgoing PHI node register values before emitting the terminator.
1278   if (I.isTerminator()) {
1279     HandlePHINodesInSuccessorBlocks(I.getParent());
1280   }
1281 
1282   // Increase the SDNodeOrder if dealing with a non-debug instruction.
1283   if (!isa<DbgInfoIntrinsic>(I))
1284     ++SDNodeOrder;
1285 
1286   CurInst = &I;
1287 
1288   // Set inserted listener only if required.
1289   bool NodeInserted = false;
1290   std::unique_ptr<SelectionDAG::DAGNodeInsertedListener> InsertedListener;
1291   MDNode *PCSectionsMD = I.getMetadata(LLVMContext::MD_pcsections);
1292   if (PCSectionsMD) {
1293     InsertedListener = std::make_unique<SelectionDAG::DAGNodeInsertedListener>(
1294         DAG, [&](SDNode *) { NodeInserted = true; });
1295   }
1296 
1297   visit(I.getOpcode(), I);
1298 
1299   if (!I.isTerminator() && !HasTailCall &&
1300       !isa<GCStatepointInst>(I)) // statepoints handle their exports internally
1301     CopyToExportRegsIfNeeded(&I);
1302 
1303   // Handle metadata.
1304   if (PCSectionsMD) {
1305     auto It = NodeMap.find(&I);
1306     if (It != NodeMap.end()) {
1307       DAG.addPCSections(It->second.getNode(), PCSectionsMD);
1308     } else if (NodeInserted) {
1309       // This should not happen; if it does, don't let it go unnoticed so we can
1310       // fix it. Relevant visit*() function is probably missing a setValue().
1311       errs() << "warning: loosing !pcsections metadata ["
1312              << I.getModule()->getName() << "]\n";
1313       LLVM_DEBUG(I.dump());
1314       assert(false);
1315     }
1316   }
1317 
1318   CurInst = nullptr;
1319 }
1320 
1321 void SelectionDAGBuilder::visitPHI(const PHINode &) {
1322   llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!");
1323 }
1324 
1325 void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) {
1326   // Note: this doesn't use InstVisitor, because it has to work with
1327   // ConstantExpr's in addition to instructions.
1328   switch (Opcode) {
1329   default: llvm_unreachable("Unknown instruction type encountered!");
1330     // Build the switch statement using the Instruction.def file.
1331 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1332     case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
1333 #include "llvm/IR/Instruction.def"
1334   }
1335 }
1336 
1337 static bool handleDanglingVariadicDebugInfo(SelectionDAG &DAG,
1338                                             DILocalVariable *Variable,
1339                                             DebugLoc DL, unsigned Order,
1340                                             SmallVectorImpl<Value *> &Values,
1341                                             DIExpression *Expression) {
1342   // For variadic dbg_values we will now insert an undef.
1343   // FIXME: We can potentially recover these!
1344   SmallVector<SDDbgOperand, 2> Locs;
1345   for (const Value *V : Values) {
1346     auto *Undef = UndefValue::get(V->getType());
1347     Locs.push_back(SDDbgOperand::fromConst(Undef));
1348   }
1349   SDDbgValue *SDV = DAG.getDbgValueList(Variable, Expression, Locs, {},
1350                                         /*IsIndirect=*/false, DL, Order,
1351                                         /*IsVariadic=*/true);
1352   DAG.AddDbgValue(SDV, /*isParameter=*/false);
1353   return true;
1354 }
1355 
1356 void SelectionDAGBuilder::addDanglingDebugInfo(SmallVectorImpl<Value *> &Values,
1357                                                DILocalVariable *Var,
1358                                                DIExpression *Expr,
1359                                                bool IsVariadic, DebugLoc DL,
1360                                                unsigned Order) {
1361   if (IsVariadic) {
1362     handleDanglingVariadicDebugInfo(DAG, Var, DL, Order, Values, Expr);
1363     return;
1364   }
1365   // TODO: Dangling debug info will eventually either be resolved or produce
1366   // an Undef DBG_VALUE. However in the resolution case, a gap may appear
1367   // between the original dbg.value location and its resolved DBG_VALUE,
1368   // which we should ideally fill with an extra Undef DBG_VALUE.
1369   assert(Values.size() == 1);
1370   DanglingDebugInfoMap[Values[0]].emplace_back(Var, Expr, DL, Order);
1371 }
1372 
1373 void SelectionDAGBuilder::dropDanglingDebugInfo(const DILocalVariable *Variable,
1374                                                 const DIExpression *Expr) {
1375   auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) {
1376     DIVariable *DanglingVariable = DDI.getVariable();
1377     DIExpression *DanglingExpr = DDI.getExpression();
1378     if (DanglingVariable == Variable && Expr->fragmentsOverlap(DanglingExpr)) {
1379       LLVM_DEBUG(dbgs() << "Dropping dangling debug info for "
1380                         << printDDI(nullptr, DDI) << "\n");
1381       return true;
1382     }
1383     return false;
1384   };
1385 
1386   for (auto &DDIMI : DanglingDebugInfoMap) {
1387     DanglingDebugInfoVector &DDIV = DDIMI.second;
1388 
1389     // If debug info is to be dropped, run it through final checks to see
1390     // whether it can be salvaged.
1391     for (auto &DDI : DDIV)
1392       if (isMatchingDbgValue(DDI))
1393         salvageUnresolvedDbgValue(DDIMI.first, DDI);
1394 
1395     erase_if(DDIV, isMatchingDbgValue);
1396   }
1397 }
1398 
1399 // resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V,
1400 // generate the debug data structures now that we've seen its definition.
1401 void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V,
1402                                                    SDValue Val) {
1403   auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V);
1404   if (DanglingDbgInfoIt == DanglingDebugInfoMap.end())
1405     return;
1406 
1407   DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
1408   for (auto &DDI : DDIV) {
1409     DebugLoc DL = DDI.getDebugLoc();
1410     unsigned ValSDNodeOrder = Val.getNode()->getIROrder();
1411     unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
1412     DILocalVariable *Variable = DDI.getVariable();
1413     DIExpression *Expr = DDI.getExpression();
1414     assert(Variable->isValidLocationForIntrinsic(DL) &&
1415            "Expected inlined-at fields to agree");
1416     SDDbgValue *SDV;
1417     if (Val.getNode()) {
1418       // FIXME: I doubt that it is correct to resolve a dangling DbgValue as a
1419       // FuncArgumentDbgValue (it would be hoisted to the function entry, and if
1420       // we couldn't resolve it directly when examining the DbgValue intrinsic
1421       // in the first place we should not be more successful here). Unless we
1422       // have some test case that prove this to be correct we should avoid
1423       // calling EmitFuncArgumentDbgValue here.
1424       if (!EmitFuncArgumentDbgValue(V, Variable, Expr, DL,
1425                                     FuncArgumentDbgValueKind::Value, Val)) {
1426         LLVM_DEBUG(dbgs() << "Resolve dangling debug info for "
1427                           << printDDI(V, DDI) << "\n");
1428         LLVM_DEBUG(dbgs() << "  By mapping to:\n    "; Val.dump());
1429         // Increase the SDNodeOrder for the DbgValue here to make sure it is
1430         // inserted after the definition of Val when emitting the instructions
1431         // after ISel. An alternative could be to teach
1432         // ScheduleDAGSDNodes::EmitSchedule to delay the insertion properly.
1433         LLVM_DEBUG(if (ValSDNodeOrder > DbgSDNodeOrder) dbgs()
1434                    << "changing SDNodeOrder from " << DbgSDNodeOrder << " to "
1435                    << ValSDNodeOrder << "\n");
1436         SDV = getDbgValue(Val, Variable, Expr, DL,
1437                           std::max(DbgSDNodeOrder, ValSDNodeOrder));
1438         DAG.AddDbgValue(SDV, false);
1439       } else
1440         LLVM_DEBUG(dbgs() << "Resolved dangling debug info for "
1441                           << printDDI(V, DDI)
1442                           << " in EmitFuncArgumentDbgValue\n");
1443     } else {
1444       LLVM_DEBUG(dbgs() << "Dropping debug info for " << printDDI(V, DDI)
1445                         << "\n");
1446       auto Undef = UndefValue::get(V->getType());
1447       auto SDV =
1448           DAG.getConstantDbgValue(Variable, Expr, Undef, DL, DbgSDNodeOrder);
1449       DAG.AddDbgValue(SDV, false);
1450     }
1451   }
1452   DDIV.clear();
1453 }
1454 
1455 void SelectionDAGBuilder::salvageUnresolvedDbgValue(const Value *V,
1456                                                     DanglingDebugInfo &DDI) {
1457   // TODO: For the variadic implementation, instead of only checking the fail
1458   // state of `handleDebugValue`, we need know specifically which values were
1459   // invalid, so that we attempt to salvage only those values when processing
1460   // a DIArgList.
1461   const Value *OrigV = V;
1462   DILocalVariable *Var = DDI.getVariable();
1463   DIExpression *Expr = DDI.getExpression();
1464   DebugLoc DL = DDI.getDebugLoc();
1465   unsigned SDOrder = DDI.getSDNodeOrder();
1466 
1467   // Currently we consider only dbg.value intrinsics -- we tell the salvager
1468   // that DW_OP_stack_value is desired.
1469   bool StackValue = true;
1470 
1471   // Can this Value can be encoded without any further work?
1472   if (handleDebugValue(V, Var, Expr, DL, SDOrder, /*IsVariadic=*/false))
1473     return;
1474 
1475   // Attempt to salvage back through as many instructions as possible. Bail if
1476   // a non-instruction is seen, such as a constant expression or global
1477   // variable. FIXME: Further work could recover those too.
1478   while (isa<Instruction>(V)) {
1479     const Instruction &VAsInst = *cast<const Instruction>(V);
1480     // Temporary "0", awaiting real implementation.
1481     SmallVector<uint64_t, 16> Ops;
1482     SmallVector<Value *, 4> AdditionalValues;
1483     V = salvageDebugInfoImpl(const_cast<Instruction &>(VAsInst),
1484                              Expr->getNumLocationOperands(), Ops,
1485                              AdditionalValues);
1486     // If we cannot salvage any further, and haven't yet found a suitable debug
1487     // expression, bail out.
1488     if (!V)
1489       break;
1490 
1491     // TODO: If AdditionalValues isn't empty, then the salvage can only be
1492     // represented with a DBG_VALUE_LIST, so we give up. When we have support
1493     // here for variadic dbg_values, remove that condition.
1494     if (!AdditionalValues.empty())
1495       break;
1496 
1497     // New value and expr now represent this debuginfo.
1498     Expr = DIExpression::appendOpsToArg(Expr, Ops, 0, StackValue);
1499 
1500     // Some kind of simplification occurred: check whether the operand of the
1501     // salvaged debug expression can be encoded in this DAG.
1502     if (handleDebugValue(V, Var, Expr, DL, SDOrder, /*IsVariadic=*/false)) {
1503       LLVM_DEBUG(
1504           dbgs() << "Salvaged debug location info for:\n  " << *Var << "\n"
1505                  << *OrigV << "\nBy stripping back to:\n  " << *V << "\n");
1506       return;
1507     }
1508   }
1509 
1510   // This was the final opportunity to salvage this debug information, and it
1511   // couldn't be done. Place an undef DBG_VALUE at this location to terminate
1512   // any earlier variable location.
1513   assert(OrigV && "V shouldn't be null");
1514   auto *Undef = UndefValue::get(OrigV->getType());
1515   auto *SDV = DAG.getConstantDbgValue(Var, Expr, Undef, DL, SDNodeOrder);
1516   DAG.AddDbgValue(SDV, false);
1517   LLVM_DEBUG(dbgs() << "Dropping debug value info for:\n  "
1518                     << printDDI(OrigV, DDI) << "\n");
1519 }
1520 
1521 void SelectionDAGBuilder::handleKillDebugValue(DILocalVariable *Var,
1522                                                DIExpression *Expr,
1523                                                DebugLoc DbgLoc,
1524                                                unsigned Order) {
1525   Value *Poison = PoisonValue::get(Type::getInt1Ty(*Context));
1526   DIExpression *NewExpr =
1527       const_cast<DIExpression *>(DIExpression::convertToUndefExpression(Expr));
1528   handleDebugValue(Poison, Var, NewExpr, DbgLoc, Order,
1529                    /*IsVariadic*/ false);
1530 }
1531 
1532 bool SelectionDAGBuilder::handleDebugValue(ArrayRef<const Value *> Values,
1533                                            DILocalVariable *Var,
1534                                            DIExpression *Expr, DebugLoc DbgLoc,
1535                                            unsigned Order, bool IsVariadic) {
1536   if (Values.empty())
1537     return true;
1538   SmallVector<SDDbgOperand> LocationOps;
1539   SmallVector<SDNode *> Dependencies;
1540   for (const Value *V : Values) {
1541     // Constant value.
1542     if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V) ||
1543         isa<ConstantPointerNull>(V)) {
1544       LocationOps.emplace_back(SDDbgOperand::fromConst(V));
1545       continue;
1546     }
1547 
1548     // Look through IntToPtr constants.
1549     if (auto *CE = dyn_cast<ConstantExpr>(V))
1550       if (CE->getOpcode() == Instruction::IntToPtr) {
1551         LocationOps.emplace_back(SDDbgOperand::fromConst(CE->getOperand(0)));
1552         continue;
1553       }
1554 
1555     // If the Value is a frame index, we can create a FrameIndex debug value
1556     // without relying on the DAG at all.
1557     if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1558       auto SI = FuncInfo.StaticAllocaMap.find(AI);
1559       if (SI != FuncInfo.StaticAllocaMap.end()) {
1560         LocationOps.emplace_back(SDDbgOperand::fromFrameIdx(SI->second));
1561         continue;
1562       }
1563     }
1564 
1565     // Do not use getValue() in here; we don't want to generate code at
1566     // this point if it hasn't been done yet.
1567     SDValue N = NodeMap[V];
1568     if (!N.getNode() && isa<Argument>(V)) // Check unused arguments map.
1569       N = UnusedArgNodeMap[V];
1570     if (N.getNode()) {
1571       // Only emit func arg dbg value for non-variadic dbg.values for now.
1572       if (!IsVariadic &&
1573           EmitFuncArgumentDbgValue(V, Var, Expr, DbgLoc,
1574                                    FuncArgumentDbgValueKind::Value, N))
1575         return true;
1576       if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) {
1577         // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can
1578         // describe stack slot locations.
1579         //
1580         // Consider "int x = 0; int *px = &x;". There are two kinds of
1581         // interesting debug values here after optimization:
1582         //
1583         //   dbg.value(i32* %px, !"int *px", !DIExpression()), and
1584         //   dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref))
1585         //
1586         // Both describe the direct values of their associated variables.
1587         Dependencies.push_back(N.getNode());
1588         LocationOps.emplace_back(SDDbgOperand::fromFrameIdx(FISDN->getIndex()));
1589         continue;
1590       }
1591       LocationOps.emplace_back(
1592           SDDbgOperand::fromNode(N.getNode(), N.getResNo()));
1593       continue;
1594     }
1595 
1596     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1597     // Special rules apply for the first dbg.values of parameter variables in a
1598     // function. Identify them by the fact they reference Argument Values, that
1599     // they're parameters, and they are parameters of the current function. We
1600     // need to let them dangle until they get an SDNode.
1601     bool IsParamOfFunc =
1602         isa<Argument>(V) && Var->isParameter() && !DbgLoc.getInlinedAt();
1603     if (IsParamOfFunc)
1604       return false;
1605 
1606     // The value is not used in this block yet (or it would have an SDNode).
1607     // We still want the value to appear for the user if possible -- if it has
1608     // an associated VReg, we can refer to that instead.
1609     auto VMI = FuncInfo.ValueMap.find(V);
1610     if (VMI != FuncInfo.ValueMap.end()) {
1611       unsigned Reg = VMI->second;
1612       // If this is a PHI node, it may be split up into several MI PHI nodes
1613       // (in FunctionLoweringInfo::set).
1614       RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg,
1615                        V->getType(), std::nullopt);
1616       if (RFV.occupiesMultipleRegs()) {
1617         // FIXME: We could potentially support variadic dbg_values here.
1618         if (IsVariadic)
1619           return false;
1620         unsigned Offset = 0;
1621         unsigned BitsToDescribe = 0;
1622         if (auto VarSize = Var->getSizeInBits())
1623           BitsToDescribe = *VarSize;
1624         if (auto Fragment = Expr->getFragmentInfo())
1625           BitsToDescribe = Fragment->SizeInBits;
1626         for (const auto &RegAndSize : RFV.getRegsAndSizes()) {
1627           // Bail out if all bits are described already.
1628           if (Offset >= BitsToDescribe)
1629             break;
1630           // TODO: handle scalable vectors.
1631           unsigned RegisterSize = RegAndSize.second;
1632           unsigned FragmentSize = (Offset + RegisterSize > BitsToDescribe)
1633                                       ? BitsToDescribe - Offset
1634                                       : RegisterSize;
1635           auto FragmentExpr = DIExpression::createFragmentExpression(
1636               Expr, Offset, FragmentSize);
1637           if (!FragmentExpr)
1638             continue;
1639           SDDbgValue *SDV = DAG.getVRegDbgValue(
1640               Var, *FragmentExpr, RegAndSize.first, false, DbgLoc, SDNodeOrder);
1641           DAG.AddDbgValue(SDV, false);
1642           Offset += RegisterSize;
1643         }
1644         return true;
1645       }
1646       // We can use simple vreg locations for variadic dbg_values as well.
1647       LocationOps.emplace_back(SDDbgOperand::fromVReg(Reg));
1648       continue;
1649     }
1650     // We failed to create a SDDbgOperand for V.
1651     return false;
1652   }
1653 
1654   // We have created a SDDbgOperand for each Value in Values.
1655   // Should use Order instead of SDNodeOrder?
1656   assert(!LocationOps.empty());
1657   SDDbgValue *SDV = DAG.getDbgValueList(Var, Expr, LocationOps, Dependencies,
1658                                         /*IsIndirect=*/false, DbgLoc,
1659                                         SDNodeOrder, IsVariadic);
1660   DAG.AddDbgValue(SDV, /*isParameter=*/false);
1661   return true;
1662 }
1663 
1664 void SelectionDAGBuilder::resolveOrClearDbgInfo() {
1665   // Try to fixup any remaining dangling debug info -- and drop it if we can't.
1666   for (auto &Pair : DanglingDebugInfoMap)
1667     for (auto &DDI : Pair.second)
1668       salvageUnresolvedDbgValue(const_cast<Value *>(Pair.first), DDI);
1669   clearDanglingDebugInfo();
1670 }
1671 
1672 /// getCopyFromRegs - If there was virtual register allocated for the value V
1673 /// emit CopyFromReg of the specified type Ty. Return empty SDValue() otherwise.
1674 SDValue SelectionDAGBuilder::getCopyFromRegs(const Value *V, Type *Ty) {
1675   DenseMap<const Value *, Register>::iterator It = FuncInfo.ValueMap.find(V);
1676   SDValue Result;
1677 
1678   if (It != FuncInfo.ValueMap.end()) {
1679     Register InReg = It->second;
1680 
1681     RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
1682                      DAG.getDataLayout(), InReg, Ty,
1683                      std::nullopt); // This is not an ABI copy.
1684     SDValue Chain = DAG.getEntryNode();
1685     Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr,
1686                                  V);
1687     resolveDanglingDebugInfo(V, Result);
1688   }
1689 
1690   return Result;
1691 }
1692 
1693 /// getValue - Return an SDValue for the given Value.
1694 SDValue SelectionDAGBuilder::getValue(const Value *V) {
1695   // If we already have an SDValue for this value, use it. It's important
1696   // to do this first, so that we don't create a CopyFromReg if we already
1697   // have a regular SDValue.
1698   SDValue &N = NodeMap[V];
1699   if (N.getNode()) return N;
1700 
1701   // If there's a virtual register allocated and initialized for this
1702   // value, use it.
1703   if (SDValue copyFromReg = getCopyFromRegs(V, V->getType()))
1704     return copyFromReg;
1705 
1706   // Otherwise create a new SDValue and remember it.
1707   SDValue Val = getValueImpl(V);
1708   NodeMap[V] = Val;
1709   resolveDanglingDebugInfo(V, Val);
1710   return Val;
1711 }
1712 
1713 /// getNonRegisterValue - Return an SDValue for the given Value, but
1714 /// don't look in FuncInfo.ValueMap for a virtual register.
1715 SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) {
1716   // If we already have an SDValue for this value, use it.
1717   SDValue &N = NodeMap[V];
1718   if (N.getNode()) {
1719     if (isIntOrFPConstant(N)) {
1720       // Remove the debug location from the node as the node is about to be used
1721       // in a location which may differ from the original debug location.  This
1722       // is relevant to Constant and ConstantFP nodes because they can appear
1723       // as constant expressions inside PHI nodes.
1724       N->setDebugLoc(DebugLoc());
1725     }
1726     return N;
1727   }
1728 
1729   // Otherwise create a new SDValue and remember it.
1730   SDValue Val = getValueImpl(V);
1731   NodeMap[V] = Val;
1732   resolveDanglingDebugInfo(V, Val);
1733   return Val;
1734 }
1735 
1736 /// getValueImpl - Helper function for getValue and getNonRegisterValue.
1737 /// Create an SDValue for the given value.
1738 SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
1739   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1740 
1741   if (const Constant *C = dyn_cast<Constant>(V)) {
1742     EVT VT = TLI.getValueType(DAG.getDataLayout(), V->getType(), true);
1743 
1744     if (const ConstantInt *CI = dyn_cast<ConstantInt>(C))
1745       return DAG.getConstant(*CI, getCurSDLoc(), VT);
1746 
1747     if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
1748       return DAG.getGlobalAddress(GV, getCurSDLoc(), VT);
1749 
1750     if (isa<ConstantPointerNull>(C)) {
1751       unsigned AS = V->getType()->getPointerAddressSpace();
1752       return DAG.getConstant(0, getCurSDLoc(),
1753                              TLI.getPointerTy(DAG.getDataLayout(), AS));
1754     }
1755 
1756     if (match(C, m_VScale()))
1757       return DAG.getVScale(getCurSDLoc(), VT, APInt(VT.getSizeInBits(), 1));
1758 
1759     if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
1760       return DAG.getConstantFP(*CFP, getCurSDLoc(), VT);
1761 
1762     if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
1763       return DAG.getUNDEF(VT);
1764 
1765     if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
1766       visit(CE->getOpcode(), *CE);
1767       SDValue N1 = NodeMap[V];
1768       assert(N1.getNode() && "visit didn't populate the NodeMap!");
1769       return N1;
1770     }
1771 
1772     if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
1773       SmallVector<SDValue, 4> Constants;
1774       for (const Use &U : C->operands()) {
1775         SDNode *Val = getValue(U).getNode();
1776         // If the operand is an empty aggregate, there are no values.
1777         if (!Val) continue;
1778         // Add each leaf value from the operand to the Constants list
1779         // to form a flattened list of all the values.
1780         for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1781           Constants.push_back(SDValue(Val, i));
1782       }
1783 
1784       return DAG.getMergeValues(Constants, getCurSDLoc());
1785     }
1786 
1787     if (const ConstantDataSequential *CDS =
1788           dyn_cast<ConstantDataSequential>(C)) {
1789       SmallVector<SDValue, 4> Ops;
1790       for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1791         SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode();
1792         // Add each leaf value from the operand to the Constants list
1793         // to form a flattened list of all the values.
1794         for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1795           Ops.push_back(SDValue(Val, i));
1796       }
1797 
1798       if (isa<ArrayType>(CDS->getType()))
1799         return DAG.getMergeValues(Ops, getCurSDLoc());
1800       return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1801     }
1802 
1803     if (C->getType()->isStructTy() || C->getType()->isArrayTy()) {
1804       assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
1805              "Unknown struct or array constant!");
1806 
1807       SmallVector<EVT, 4> ValueVTs;
1808       ComputeValueVTs(TLI, DAG.getDataLayout(), C->getType(), ValueVTs);
1809       unsigned NumElts = ValueVTs.size();
1810       if (NumElts == 0)
1811         return SDValue(); // empty struct
1812       SmallVector<SDValue, 4> Constants(NumElts);
1813       for (unsigned i = 0; i != NumElts; ++i) {
1814         EVT EltVT = ValueVTs[i];
1815         if (isa<UndefValue>(C))
1816           Constants[i] = DAG.getUNDEF(EltVT);
1817         else if (EltVT.isFloatingPoint())
1818           Constants[i] = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1819         else
1820           Constants[i] = DAG.getConstant(0, getCurSDLoc(), EltVT);
1821       }
1822 
1823       return DAG.getMergeValues(Constants, getCurSDLoc());
1824     }
1825 
1826     if (const BlockAddress *BA = dyn_cast<BlockAddress>(C))
1827       return DAG.getBlockAddress(BA, VT);
1828 
1829     if (const auto *Equiv = dyn_cast<DSOLocalEquivalent>(C))
1830       return getValue(Equiv->getGlobalValue());
1831 
1832     if (const auto *NC = dyn_cast<NoCFIValue>(C))
1833       return getValue(NC->getGlobalValue());
1834 
1835     if (VT == MVT::aarch64svcount) {
1836       assert(C->isNullValue() && "Can only zero this target type!");
1837       return DAG.getNode(ISD::BITCAST, getCurSDLoc(), VT,
1838                          DAG.getConstant(0, getCurSDLoc(), MVT::nxv16i1));
1839     }
1840 
1841     VectorType *VecTy = cast<VectorType>(V->getType());
1842 
1843     // Now that we know the number and type of the elements, get that number of
1844     // elements into the Ops array based on what kind of constant it is.
1845     if (const ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
1846       SmallVector<SDValue, 16> Ops;
1847       unsigned NumElements = cast<FixedVectorType>(VecTy)->getNumElements();
1848       for (unsigned i = 0; i != NumElements; ++i)
1849         Ops.push_back(getValue(CV->getOperand(i)));
1850 
1851       return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1852     }
1853 
1854     if (isa<ConstantAggregateZero>(C)) {
1855       EVT EltVT =
1856           TLI.getValueType(DAG.getDataLayout(), VecTy->getElementType());
1857 
1858       SDValue Op;
1859       if (EltVT.isFloatingPoint())
1860         Op = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1861       else
1862         Op = DAG.getConstant(0, getCurSDLoc(), EltVT);
1863 
1864       return NodeMap[V] = DAG.getSplat(VT, getCurSDLoc(), Op);
1865     }
1866 
1867     llvm_unreachable("Unknown vector constant");
1868   }
1869 
1870   // If this is a static alloca, generate it as the frameindex instead of
1871   // computation.
1872   if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1873     DenseMap<const AllocaInst*, int>::iterator SI =
1874       FuncInfo.StaticAllocaMap.find(AI);
1875     if (SI != FuncInfo.StaticAllocaMap.end())
1876       return DAG.getFrameIndex(
1877           SI->second, TLI.getValueType(DAG.getDataLayout(), AI->getType()));
1878   }
1879 
1880   // If this is an instruction which fast-isel has deferred, select it now.
1881   if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
1882     Register InReg = FuncInfo.InitializeRegForValue(Inst);
1883 
1884     RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg,
1885                      Inst->getType(), std::nullopt);
1886     SDValue Chain = DAG.getEntryNode();
1887     return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
1888   }
1889 
1890   if (const MetadataAsValue *MD = dyn_cast<MetadataAsValue>(V))
1891     return DAG.getMDNode(cast<MDNode>(MD->getMetadata()));
1892 
1893   if (const auto *BB = dyn_cast<BasicBlock>(V))
1894     return DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
1895 
1896   llvm_unreachable("Can't get register for value!");
1897 }
1898 
1899 void SelectionDAGBuilder::visitCatchPad(const CatchPadInst &I) {
1900   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1901   bool IsMSVCCXX = Pers == EHPersonality::MSVC_CXX;
1902   bool IsCoreCLR = Pers == EHPersonality::CoreCLR;
1903   bool IsSEH = isAsynchronousEHPersonality(Pers);
1904   MachineBasicBlock *CatchPadMBB = FuncInfo.MBB;
1905   if (!IsSEH)
1906     CatchPadMBB->setIsEHScopeEntry();
1907   // In MSVC C++ and CoreCLR, catchblocks are funclets and need prologues.
1908   if (IsMSVCCXX || IsCoreCLR)
1909     CatchPadMBB->setIsEHFuncletEntry();
1910 }
1911 
1912 void SelectionDAGBuilder::visitCatchRet(const CatchReturnInst &I) {
1913   // Update machine-CFG edge.
1914   MachineBasicBlock *TargetMBB = FuncInfo.MBBMap[I.getSuccessor()];
1915   FuncInfo.MBB->addSuccessor(TargetMBB);
1916   TargetMBB->setIsEHCatchretTarget(true);
1917   DAG.getMachineFunction().setHasEHCatchret(true);
1918 
1919   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1920   bool IsSEH = isAsynchronousEHPersonality(Pers);
1921   if (IsSEH) {
1922     // If this is not a fall-through branch or optimizations are switched off,
1923     // emit the branch.
1924     if (TargetMBB != NextBlock(FuncInfo.MBB) ||
1925         TM.getOptLevel() == CodeGenOptLevel::None)
1926       DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
1927                               getControlRoot(), DAG.getBasicBlock(TargetMBB)));
1928     return;
1929   }
1930 
1931   // Figure out the funclet membership for the catchret's successor.
1932   // This will be used by the FuncletLayout pass to determine how to order the
1933   // BB's.
1934   // A 'catchret' returns to the outer scope's color.
1935   Value *ParentPad = I.getCatchSwitchParentPad();
1936   const BasicBlock *SuccessorColor;
1937   if (isa<ConstantTokenNone>(ParentPad))
1938     SuccessorColor = &FuncInfo.Fn->getEntryBlock();
1939   else
1940     SuccessorColor = cast<Instruction>(ParentPad)->getParent();
1941   assert(SuccessorColor && "No parent funclet for catchret!");
1942   MachineBasicBlock *SuccessorColorMBB = FuncInfo.MBBMap[SuccessorColor];
1943   assert(SuccessorColorMBB && "No MBB for SuccessorColor!");
1944 
1945   // Create the terminator node.
1946   SDValue Ret = DAG.getNode(ISD::CATCHRET, getCurSDLoc(), MVT::Other,
1947                             getControlRoot(), DAG.getBasicBlock(TargetMBB),
1948                             DAG.getBasicBlock(SuccessorColorMBB));
1949   DAG.setRoot(Ret);
1950 }
1951 
1952 void SelectionDAGBuilder::visitCleanupPad(const CleanupPadInst &CPI) {
1953   // Don't emit any special code for the cleanuppad instruction. It just marks
1954   // the start of an EH scope/funclet.
1955   FuncInfo.MBB->setIsEHScopeEntry();
1956   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1957   if (Pers != EHPersonality::Wasm_CXX) {
1958     FuncInfo.MBB->setIsEHFuncletEntry();
1959     FuncInfo.MBB->setIsCleanupFuncletEntry();
1960   }
1961 }
1962 
1963 // In wasm EH, even though a catchpad may not catch an exception if a tag does
1964 // not match, it is OK to add only the first unwind destination catchpad to the
1965 // successors, because there will be at least one invoke instruction within the
1966 // catch scope that points to the next unwind destination, if one exists, so
1967 // CFGSort cannot mess up with BB sorting order.
1968 // (All catchpads with 'catch (type)' clauses have a 'llvm.rethrow' intrinsic
1969 // call within them, and catchpads only consisting of 'catch (...)' have a
1970 // '__cxa_end_catch' call within them, both of which generate invokes in case
1971 // the next unwind destination exists, i.e., the next unwind destination is not
1972 // the caller.)
1973 //
1974 // Having at most one EH pad successor is also simpler and helps later
1975 // transformations.
1976 //
1977 // For example,
1978 // current:
1979 //   invoke void @foo to ... unwind label %catch.dispatch
1980 // catch.dispatch:
1981 //   %0 = catchswitch within ... [label %catch.start] unwind label %next
1982 // catch.start:
1983 //   ...
1984 //   ... in this BB or some other child BB dominated by this BB there will be an
1985 //   invoke that points to 'next' BB as an unwind destination
1986 //
1987 // next: ; We don't need to add this to 'current' BB's successor
1988 //   ...
1989 static void findWasmUnwindDestinations(
1990     FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
1991     BranchProbability Prob,
1992     SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
1993         &UnwindDests) {
1994   while (EHPadBB) {
1995     const Instruction *Pad = EHPadBB->getFirstNonPHI();
1996     if (isa<CleanupPadInst>(Pad)) {
1997       // Stop on cleanup pads.
1998       UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1999       UnwindDests.back().first->setIsEHScopeEntry();
2000       break;
2001     } else if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2002       // Add the catchpad handlers to the possible destinations. We don't
2003       // continue to the unwind destination of the catchswitch for wasm.
2004       for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2005         UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob);
2006         UnwindDests.back().first->setIsEHScopeEntry();
2007       }
2008       break;
2009     } else {
2010       continue;
2011     }
2012   }
2013 }
2014 
2015 /// When an invoke or a cleanupret unwinds to the next EH pad, there are
2016 /// many places it could ultimately go. In the IR, we have a single unwind
2017 /// destination, but in the machine CFG, we enumerate all the possible blocks.
2018 /// This function skips over imaginary basic blocks that hold catchswitch
2019 /// instructions, and finds all the "real" machine
2020 /// basic block destinations. As those destinations may not be successors of
2021 /// EHPadBB, here we also calculate the edge probability to those destinations.
2022 /// The passed-in Prob is the edge probability to EHPadBB.
2023 static void findUnwindDestinations(
2024     FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
2025     BranchProbability Prob,
2026     SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
2027         &UnwindDests) {
2028   EHPersonality Personality =
2029     classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
2030   bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX;
2031   bool IsCoreCLR = Personality == EHPersonality::CoreCLR;
2032   bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX;
2033   bool IsSEH = isAsynchronousEHPersonality(Personality);
2034 
2035   if (IsWasmCXX) {
2036     findWasmUnwindDestinations(FuncInfo, EHPadBB, Prob, UnwindDests);
2037     assert(UnwindDests.size() <= 1 &&
2038            "There should be at most one unwind destination for wasm");
2039     return;
2040   }
2041 
2042   while (EHPadBB) {
2043     const Instruction *Pad = EHPadBB->getFirstNonPHI();
2044     BasicBlock *NewEHPadBB = nullptr;
2045     if (isa<LandingPadInst>(Pad)) {
2046       // Stop on landingpads. They are not funclets.
2047       UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
2048       break;
2049     } else if (isa<CleanupPadInst>(Pad)) {
2050       // Stop on cleanup pads. Cleanups are always funclet entries for all known
2051       // personalities.
2052       UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
2053       UnwindDests.back().first->setIsEHScopeEntry();
2054       UnwindDests.back().first->setIsEHFuncletEntry();
2055       break;
2056     } else if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2057       // Add the catchpad handlers to the possible destinations.
2058       for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2059         UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob);
2060         // For MSVC++ and the CLR, catchblocks are funclets and need prologues.
2061         if (IsMSVCCXX || IsCoreCLR)
2062           UnwindDests.back().first->setIsEHFuncletEntry();
2063         if (!IsSEH)
2064           UnwindDests.back().first->setIsEHScopeEntry();
2065       }
2066       NewEHPadBB = CatchSwitch->getUnwindDest();
2067     } else {
2068       continue;
2069     }
2070 
2071     BranchProbabilityInfo *BPI = FuncInfo.BPI;
2072     if (BPI && NewEHPadBB)
2073       Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB);
2074     EHPadBB = NewEHPadBB;
2075   }
2076 }
2077 
2078 void SelectionDAGBuilder::visitCleanupRet(const CleanupReturnInst &I) {
2079   // Update successor info.
2080   SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
2081   auto UnwindDest = I.getUnwindDest();
2082   BranchProbabilityInfo *BPI = FuncInfo.BPI;
2083   BranchProbability UnwindDestProb =
2084       (BPI && UnwindDest)
2085           ? BPI->getEdgeProbability(FuncInfo.MBB->getBasicBlock(), UnwindDest)
2086           : BranchProbability::getZero();
2087   findUnwindDestinations(FuncInfo, UnwindDest, UnwindDestProb, UnwindDests);
2088   for (auto &UnwindDest : UnwindDests) {
2089     UnwindDest.first->setIsEHPad();
2090     addSuccessorWithProb(FuncInfo.MBB, UnwindDest.first, UnwindDest.second);
2091   }
2092   FuncInfo.MBB->normalizeSuccProbs();
2093 
2094   // Create the terminator node.
2095   SDValue Ret =
2096       DAG.getNode(ISD::CLEANUPRET, getCurSDLoc(), MVT::Other, getControlRoot());
2097   DAG.setRoot(Ret);
2098 }
2099 
2100 void SelectionDAGBuilder::visitCatchSwitch(const CatchSwitchInst &CSI) {
2101   report_fatal_error("visitCatchSwitch not yet implemented!");
2102 }
2103 
2104 void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
2105   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2106   auto &DL = DAG.getDataLayout();
2107   SDValue Chain = getControlRoot();
2108   SmallVector<ISD::OutputArg, 8> Outs;
2109   SmallVector<SDValue, 8> OutVals;
2110 
2111   // Calls to @llvm.experimental.deoptimize don't generate a return value, so
2112   // lower
2113   //
2114   //   %val = call <ty> @llvm.experimental.deoptimize()
2115   //   ret <ty> %val
2116   //
2117   // differently.
2118   if (I.getParent()->getTerminatingDeoptimizeCall()) {
2119     LowerDeoptimizingReturn();
2120     return;
2121   }
2122 
2123   if (!FuncInfo.CanLowerReturn) {
2124     unsigned DemoteReg = FuncInfo.DemoteRegister;
2125     const Function *F = I.getParent()->getParent();
2126 
2127     // Emit a store of the return value through the virtual register.
2128     // Leave Outs empty so that LowerReturn won't try to load return
2129     // registers the usual way.
2130     SmallVector<EVT, 1> PtrValueVTs;
2131     ComputeValueVTs(TLI, DL,
2132                     PointerType::get(F->getContext(),
2133                                      DAG.getDataLayout().getAllocaAddrSpace()),
2134                     PtrValueVTs);
2135 
2136     SDValue RetPtr =
2137         DAG.getCopyFromReg(Chain, getCurSDLoc(), DemoteReg, PtrValueVTs[0]);
2138     SDValue RetOp = getValue(I.getOperand(0));
2139 
2140     SmallVector<EVT, 4> ValueVTs, MemVTs;
2141     SmallVector<uint64_t, 4> Offsets;
2142     ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs, &MemVTs,
2143                     &Offsets, 0);
2144     unsigned NumValues = ValueVTs.size();
2145 
2146     SmallVector<SDValue, 4> Chains(NumValues);
2147     Align BaseAlign = DL.getPrefTypeAlign(I.getOperand(0)->getType());
2148     for (unsigned i = 0; i != NumValues; ++i) {
2149       // An aggregate return value cannot wrap around the address space, so
2150       // offsets to its parts don't wrap either.
2151       SDValue Ptr = DAG.getObjectPtrOffset(getCurSDLoc(), RetPtr,
2152                                            TypeSize::getFixed(Offsets[i]));
2153 
2154       SDValue Val = RetOp.getValue(RetOp.getResNo() + i);
2155       if (MemVTs[i] != ValueVTs[i])
2156         Val = DAG.getPtrExtOrTrunc(Val, getCurSDLoc(), MemVTs[i]);
2157       Chains[i] = DAG.getStore(
2158           Chain, getCurSDLoc(), Val,
2159           // FIXME: better loc info would be nice.
2160           Ptr, MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()),
2161           commonAlignment(BaseAlign, Offsets[i]));
2162     }
2163 
2164     Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
2165                         MVT::Other, Chains);
2166   } else if (I.getNumOperands() != 0) {
2167     SmallVector<EVT, 4> ValueVTs;
2168     ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs);
2169     unsigned NumValues = ValueVTs.size();
2170     if (NumValues) {
2171       SDValue RetOp = getValue(I.getOperand(0));
2172 
2173       const Function *F = I.getParent()->getParent();
2174 
2175       bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
2176           I.getOperand(0)->getType(), F->getCallingConv(),
2177           /*IsVarArg*/ false, DL);
2178 
2179       ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
2180       if (F->getAttributes().hasRetAttr(Attribute::SExt))
2181         ExtendKind = ISD::SIGN_EXTEND;
2182       else if (F->getAttributes().hasRetAttr(Attribute::ZExt))
2183         ExtendKind = ISD::ZERO_EXTEND;
2184 
2185       LLVMContext &Context = F->getContext();
2186       bool RetInReg = F->getAttributes().hasRetAttr(Attribute::InReg);
2187 
2188       for (unsigned j = 0; j != NumValues; ++j) {
2189         EVT VT = ValueVTs[j];
2190 
2191         if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
2192           VT = TLI.getTypeForExtReturn(Context, VT, ExtendKind);
2193 
2194         CallingConv::ID CC = F->getCallingConv();
2195 
2196         unsigned NumParts = TLI.getNumRegistersForCallingConv(Context, CC, VT);
2197         MVT PartVT = TLI.getRegisterTypeForCallingConv(Context, CC, VT);
2198         SmallVector<SDValue, 4> Parts(NumParts);
2199         getCopyToParts(DAG, getCurSDLoc(),
2200                        SDValue(RetOp.getNode(), RetOp.getResNo() + j),
2201                        &Parts[0], NumParts, PartVT, &I, CC, ExtendKind);
2202 
2203         // 'inreg' on function refers to return value
2204         ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
2205         if (RetInReg)
2206           Flags.setInReg();
2207 
2208         if (I.getOperand(0)->getType()->isPointerTy()) {
2209           Flags.setPointer();
2210           Flags.setPointerAddrSpace(
2211               cast<PointerType>(I.getOperand(0)->getType())->getAddressSpace());
2212         }
2213 
2214         if (NeedsRegBlock) {
2215           Flags.setInConsecutiveRegs();
2216           if (j == NumValues - 1)
2217             Flags.setInConsecutiveRegsLast();
2218         }
2219 
2220         // Propagate extension type if any
2221         if (ExtendKind == ISD::SIGN_EXTEND)
2222           Flags.setSExt();
2223         else if (ExtendKind == ISD::ZERO_EXTEND)
2224           Flags.setZExt();
2225 
2226         for (unsigned i = 0; i < NumParts; ++i) {
2227           Outs.push_back(ISD::OutputArg(Flags,
2228                                         Parts[i].getValueType().getSimpleVT(),
2229                                         VT, /*isfixed=*/true, 0, 0));
2230           OutVals.push_back(Parts[i]);
2231         }
2232       }
2233     }
2234   }
2235 
2236   // Push in swifterror virtual register as the last element of Outs. This makes
2237   // sure swifterror virtual register will be returned in the swifterror
2238   // physical register.
2239   const Function *F = I.getParent()->getParent();
2240   if (TLI.supportSwiftError() &&
2241       F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
2242     assert(SwiftError.getFunctionArg() && "Need a swift error argument");
2243     ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
2244     Flags.setSwiftError();
2245     Outs.push_back(ISD::OutputArg(
2246         Flags, /*vt=*/TLI.getPointerTy(DL), /*argvt=*/EVT(TLI.getPointerTy(DL)),
2247         /*isfixed=*/true, /*origidx=*/1, /*partOffs=*/0));
2248     // Create SDNode for the swifterror virtual register.
2249     OutVals.push_back(
2250         DAG.getRegister(SwiftError.getOrCreateVRegUseAt(
2251                             &I, FuncInfo.MBB, SwiftError.getFunctionArg()),
2252                         EVT(TLI.getPointerTy(DL))));
2253   }
2254 
2255   bool isVarArg = DAG.getMachineFunction().getFunction().isVarArg();
2256   CallingConv::ID CallConv =
2257     DAG.getMachineFunction().getFunction().getCallingConv();
2258   Chain = DAG.getTargetLoweringInfo().LowerReturn(
2259       Chain, CallConv, isVarArg, Outs, OutVals, getCurSDLoc(), DAG);
2260 
2261   // Verify that the target's LowerReturn behaved as expected.
2262   assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
2263          "LowerReturn didn't return a valid chain!");
2264 
2265   // Update the DAG with the new chain value resulting from return lowering.
2266   DAG.setRoot(Chain);
2267 }
2268 
2269 /// CopyToExportRegsIfNeeded - If the given value has virtual registers
2270 /// created for it, emit nodes to copy the value into the virtual
2271 /// registers.
2272 void SelectionDAGBuilder::CopyToExportRegsIfNeeded(const Value *V) {
2273   // Skip empty types
2274   if (V->getType()->isEmptyTy())
2275     return;
2276 
2277   DenseMap<const Value *, Register>::iterator VMI = FuncInfo.ValueMap.find(V);
2278   if (VMI != FuncInfo.ValueMap.end()) {
2279     assert((!V->use_empty() || isa<CallBrInst>(V)) &&
2280            "Unused value assigned virtual registers!");
2281     CopyValueToVirtualRegister(V, VMI->second);
2282   }
2283 }
2284 
2285 /// ExportFromCurrentBlock - If this condition isn't known to be exported from
2286 /// the current basic block, add it to ValueMap now so that we'll get a
2287 /// CopyTo/FromReg.
2288 void SelectionDAGBuilder::ExportFromCurrentBlock(const Value *V) {
2289   // No need to export constants.
2290   if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
2291 
2292   // Already exported?
2293   if (FuncInfo.isExportedInst(V)) return;
2294 
2295   Register Reg = FuncInfo.InitializeRegForValue(V);
2296   CopyValueToVirtualRegister(V, Reg);
2297 }
2298 
2299 bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V,
2300                                                      const BasicBlock *FromBB) {
2301   // The operands of the setcc have to be in this block.  We don't know
2302   // how to export them from some other block.
2303   if (const Instruction *VI = dyn_cast<Instruction>(V)) {
2304     // Can export from current BB.
2305     if (VI->getParent() == FromBB)
2306       return true;
2307 
2308     // Is already exported, noop.
2309     return FuncInfo.isExportedInst(V);
2310   }
2311 
2312   // If this is an argument, we can export it if the BB is the entry block or
2313   // if it is already exported.
2314   if (isa<Argument>(V)) {
2315     if (FromBB->isEntryBlock())
2316       return true;
2317 
2318     // Otherwise, can only export this if it is already exported.
2319     return FuncInfo.isExportedInst(V);
2320   }
2321 
2322   // Otherwise, constants can always be exported.
2323   return true;
2324 }
2325 
2326 /// Return branch probability calculated by BranchProbabilityInfo for IR blocks.
2327 BranchProbability
2328 SelectionDAGBuilder::getEdgeProbability(const MachineBasicBlock *Src,
2329                                         const MachineBasicBlock *Dst) const {
2330   BranchProbabilityInfo *BPI = FuncInfo.BPI;
2331   const BasicBlock *SrcBB = Src->getBasicBlock();
2332   const BasicBlock *DstBB = Dst->getBasicBlock();
2333   if (!BPI) {
2334     // If BPI is not available, set the default probability as 1 / N, where N is
2335     // the number of successors.
2336     auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
2337     return BranchProbability(1, SuccSize);
2338   }
2339   return BPI->getEdgeProbability(SrcBB, DstBB);
2340 }
2341 
2342 void SelectionDAGBuilder::addSuccessorWithProb(MachineBasicBlock *Src,
2343                                                MachineBasicBlock *Dst,
2344                                                BranchProbability Prob) {
2345   if (!FuncInfo.BPI)
2346     Src->addSuccessorWithoutProb(Dst);
2347   else {
2348     if (Prob.isUnknown())
2349       Prob = getEdgeProbability(Src, Dst);
2350     Src->addSuccessor(Dst, Prob);
2351   }
2352 }
2353 
2354 static bool InBlock(const Value *V, const BasicBlock *BB) {
2355   if (const Instruction *I = dyn_cast<Instruction>(V))
2356     return I->getParent() == BB;
2357   return true;
2358 }
2359 
2360 /// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
2361 /// This function emits a branch and is used at the leaves of an OR or an
2362 /// AND operator tree.
2363 void
2364 SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
2365                                                   MachineBasicBlock *TBB,
2366                                                   MachineBasicBlock *FBB,
2367                                                   MachineBasicBlock *CurBB,
2368                                                   MachineBasicBlock *SwitchBB,
2369                                                   BranchProbability TProb,
2370                                                   BranchProbability FProb,
2371                                                   bool InvertCond) {
2372   const BasicBlock *BB = CurBB->getBasicBlock();
2373 
2374   // If the leaf of the tree is a comparison, merge the condition into
2375   // the caseblock.
2376   if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
2377     // The operands of the cmp have to be in this block.  We don't know
2378     // how to export them from some other block.  If this is the first block
2379     // of the sequence, no exporting is needed.
2380     if (CurBB == SwitchBB ||
2381         (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
2382          isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
2383       ISD::CondCode Condition;
2384       if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
2385         ICmpInst::Predicate Pred =
2386             InvertCond ? IC->getInversePredicate() : IC->getPredicate();
2387         Condition = getICmpCondCode(Pred);
2388       } else {
2389         const FCmpInst *FC = cast<FCmpInst>(Cond);
2390         FCmpInst::Predicate Pred =
2391             InvertCond ? FC->getInversePredicate() : FC->getPredicate();
2392         Condition = getFCmpCondCode(Pred);
2393         if (TM.Options.NoNaNsFPMath)
2394           Condition = getFCmpCodeWithoutNaN(Condition);
2395       }
2396 
2397       CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1), nullptr,
2398                    TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
2399       SL->SwitchCases.push_back(CB);
2400       return;
2401     }
2402   }
2403 
2404   // Create a CaseBlock record representing this branch.
2405   ISD::CondCode Opc = InvertCond ? ISD::SETNE : ISD::SETEQ;
2406   CaseBlock CB(Opc, Cond, ConstantInt::getTrue(*DAG.getContext()),
2407                nullptr, TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
2408   SL->SwitchCases.push_back(CB);
2409 }
2410 
2411 void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
2412                                                MachineBasicBlock *TBB,
2413                                                MachineBasicBlock *FBB,
2414                                                MachineBasicBlock *CurBB,
2415                                                MachineBasicBlock *SwitchBB,
2416                                                Instruction::BinaryOps Opc,
2417                                                BranchProbability TProb,
2418                                                BranchProbability FProb,
2419                                                bool InvertCond) {
2420   // Skip over not part of the tree and remember to invert op and operands at
2421   // next level.
2422   Value *NotCond;
2423   if (match(Cond, m_OneUse(m_Not(m_Value(NotCond)))) &&
2424       InBlock(NotCond, CurBB->getBasicBlock())) {
2425     FindMergedConditions(NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
2426                          !InvertCond);
2427     return;
2428   }
2429 
2430   const Instruction *BOp = dyn_cast<Instruction>(Cond);
2431   const Value *BOpOp0, *BOpOp1;
2432   // Compute the effective opcode for Cond, taking into account whether it needs
2433   // to be inverted, e.g.
2434   //   and (not (or A, B)), C
2435   // gets lowered as
2436   //   and (and (not A, not B), C)
2437   Instruction::BinaryOps BOpc = (Instruction::BinaryOps)0;
2438   if (BOp) {
2439     BOpc = match(BOp, m_LogicalAnd(m_Value(BOpOp0), m_Value(BOpOp1)))
2440                ? Instruction::And
2441                : (match(BOp, m_LogicalOr(m_Value(BOpOp0), m_Value(BOpOp1)))
2442                       ? Instruction::Or
2443                       : (Instruction::BinaryOps)0);
2444     if (InvertCond) {
2445       if (BOpc == Instruction::And)
2446         BOpc = Instruction::Or;
2447       else if (BOpc == Instruction::Or)
2448         BOpc = Instruction::And;
2449     }
2450   }
2451 
2452   // If this node is not part of the or/and tree, emit it as a branch.
2453   // Note that all nodes in the tree should have same opcode.
2454   bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->hasOneUse();
2455   if (!BOpIsInOrAndTree || BOp->getParent() != CurBB->getBasicBlock() ||
2456       !InBlock(BOpOp0, CurBB->getBasicBlock()) ||
2457       !InBlock(BOpOp1, CurBB->getBasicBlock())) {
2458     EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB,
2459                                  TProb, FProb, InvertCond);
2460     return;
2461   }
2462 
2463   //  Create TmpBB after CurBB.
2464   MachineFunction::iterator BBI(CurBB);
2465   MachineFunction &MF = DAG.getMachineFunction();
2466   MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
2467   CurBB->getParent()->insert(++BBI, TmpBB);
2468 
2469   if (Opc == Instruction::Or) {
2470     // Codegen X | Y as:
2471     // BB1:
2472     //   jmp_if_X TBB
2473     //   jmp TmpBB
2474     // TmpBB:
2475     //   jmp_if_Y TBB
2476     //   jmp FBB
2477     //
2478 
2479     // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
2480     // The requirement is that
2481     //   TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
2482     //     = TrueProb for original BB.
2483     // Assuming the original probabilities are A and B, one choice is to set
2484     // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
2485     // A/(1+B) and 2B/(1+B). This choice assumes that
2486     //   TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
2487     // Another choice is to assume TrueProb for BB1 equals to TrueProb for
2488     // TmpBB, but the math is more complicated.
2489 
2490     auto NewTrueProb = TProb / 2;
2491     auto NewFalseProb = TProb / 2 + FProb;
2492     // Emit the LHS condition.
2493     FindMergedConditions(BOpOp0, TBB, TmpBB, CurBB, SwitchBB, Opc, NewTrueProb,
2494                          NewFalseProb, InvertCond);
2495 
2496     // Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
2497     SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb};
2498     BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
2499     // Emit the RHS condition into TmpBB.
2500     FindMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
2501                          Probs[1], InvertCond);
2502   } else {
2503     assert(Opc == Instruction::And && "Unknown merge op!");
2504     // Codegen X & Y as:
2505     // BB1:
2506     //   jmp_if_X TmpBB
2507     //   jmp FBB
2508     // TmpBB:
2509     //   jmp_if_Y TBB
2510     //   jmp FBB
2511     //
2512     //  This requires creation of TmpBB after CurBB.
2513 
2514     // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
2515     // The requirement is that
2516     //   FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
2517     //     = FalseProb for original BB.
2518     // Assuming the original probabilities are A and B, one choice is to set
2519     // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
2520     // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
2521     // TrueProb for BB1 * FalseProb for TmpBB.
2522 
2523     auto NewTrueProb = TProb + FProb / 2;
2524     auto NewFalseProb = FProb / 2;
2525     // Emit the LHS condition.
2526     FindMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB, Opc, NewTrueProb,
2527                          NewFalseProb, InvertCond);
2528 
2529     // Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
2530     SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2};
2531     BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
2532     // Emit the RHS condition into TmpBB.
2533     FindMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
2534                          Probs[1], InvertCond);
2535   }
2536 }
2537 
2538 /// If the set of cases should be emitted as a series of branches, return true.
2539 /// If we should emit this as a bunch of and/or'd together conditions, return
2540 /// false.
2541 bool
2542 SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases) {
2543   if (Cases.size() != 2) return true;
2544 
2545   // If this is two comparisons of the same values or'd or and'd together, they
2546   // will get folded into a single comparison, so don't emit two blocks.
2547   if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
2548        Cases[0].CmpRHS == Cases[1].CmpRHS) ||
2549       (Cases[0].CmpRHS == Cases[1].CmpLHS &&
2550        Cases[0].CmpLHS == Cases[1].CmpRHS)) {
2551     return false;
2552   }
2553 
2554   // Handle: (X != null) | (Y != null) --> (X|Y) != 0
2555   // Handle: (X == null) & (Y == null) --> (X|Y) == 0
2556   if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
2557       Cases[0].CC == Cases[1].CC &&
2558       isa<Constant>(Cases[0].CmpRHS) &&
2559       cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
2560     if (Cases[0].CC == ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
2561       return false;
2562     if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
2563       return false;
2564   }
2565 
2566   return true;
2567 }
2568 
2569 void SelectionDAGBuilder::visitBr(const BranchInst &I) {
2570   MachineBasicBlock *BrMBB = FuncInfo.MBB;
2571 
2572   // Update machine-CFG edges.
2573   MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
2574 
2575   if (I.isUnconditional()) {
2576     // Update machine-CFG edges.
2577     BrMBB->addSuccessor(Succ0MBB);
2578 
2579     // If this is not a fall-through branch or optimizations are switched off,
2580     // emit the branch.
2581     if (Succ0MBB != NextBlock(BrMBB) ||
2582         TM.getOptLevel() == CodeGenOptLevel::None) {
2583       auto Br = DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
2584                             getControlRoot(), DAG.getBasicBlock(Succ0MBB));
2585       setValue(&I, Br);
2586       DAG.setRoot(Br);
2587     }
2588 
2589     return;
2590   }
2591 
2592   // If this condition is one of the special cases we handle, do special stuff
2593   // now.
2594   const Value *CondVal = I.getCondition();
2595   MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
2596 
2597   // If this is a series of conditions that are or'd or and'd together, emit
2598   // this as a sequence of branches instead of setcc's with and/or operations.
2599   // As long as jumps are not expensive (exceptions for multi-use logic ops,
2600   // unpredictable branches, and vector extracts because those jumps are likely
2601   // expensive for any target), this should improve performance.
2602   // For example, instead of something like:
2603   //     cmp A, B
2604   //     C = seteq
2605   //     cmp D, E
2606   //     F = setle
2607   //     or C, F
2608   //     jnz foo
2609   // Emit:
2610   //     cmp A, B
2611   //     je foo
2612   //     cmp D, E
2613   //     jle foo
2614   const Instruction *BOp = dyn_cast<Instruction>(CondVal);
2615   if (!DAG.getTargetLoweringInfo().isJumpExpensive() && BOp &&
2616       BOp->hasOneUse() && !I.hasMetadata(LLVMContext::MD_unpredictable)) {
2617     Value *Vec;
2618     const Value *BOp0, *BOp1;
2619     Instruction::BinaryOps Opcode = (Instruction::BinaryOps)0;
2620     if (match(BOp, m_LogicalAnd(m_Value(BOp0), m_Value(BOp1))))
2621       Opcode = Instruction::And;
2622     else if (match(BOp, m_LogicalOr(m_Value(BOp0), m_Value(BOp1))))
2623       Opcode = Instruction::Or;
2624 
2625     if (Opcode && !(match(BOp0, m_ExtractElt(m_Value(Vec), m_Value())) &&
2626                     match(BOp1, m_ExtractElt(m_Specific(Vec), m_Value())))) {
2627       FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB, Opcode,
2628                            getEdgeProbability(BrMBB, Succ0MBB),
2629                            getEdgeProbability(BrMBB, Succ1MBB),
2630                            /*InvertCond=*/false);
2631       // If the compares in later blocks need to use values not currently
2632       // exported from this block, export them now.  This block should always
2633       // be the first entry.
2634       assert(SL->SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!");
2635 
2636       // Allow some cases to be rejected.
2637       if (ShouldEmitAsBranches(SL->SwitchCases)) {
2638         for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i) {
2639           ExportFromCurrentBlock(SL->SwitchCases[i].CmpLHS);
2640           ExportFromCurrentBlock(SL->SwitchCases[i].CmpRHS);
2641         }
2642 
2643         // Emit the branch for this block.
2644         visitSwitchCase(SL->SwitchCases[0], BrMBB);
2645         SL->SwitchCases.erase(SL->SwitchCases.begin());
2646         return;
2647       }
2648 
2649       // Okay, we decided not to do this, remove any inserted MBB's and clear
2650       // SwitchCases.
2651       for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i)
2652         FuncInfo.MF->erase(SL->SwitchCases[i].ThisBB);
2653 
2654       SL->SwitchCases.clear();
2655     }
2656   }
2657 
2658   // Create a CaseBlock record representing this branch.
2659   CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
2660                nullptr, Succ0MBB, Succ1MBB, BrMBB, getCurSDLoc());
2661 
2662   // Use visitSwitchCase to actually insert the fast branch sequence for this
2663   // cond branch.
2664   visitSwitchCase(CB, BrMBB);
2665 }
2666 
2667 /// visitSwitchCase - Emits the necessary code to represent a single node in
2668 /// the binary search tree resulting from lowering a switch instruction.
2669 void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB,
2670                                           MachineBasicBlock *SwitchBB) {
2671   SDValue Cond;
2672   SDValue CondLHS = getValue(CB.CmpLHS);
2673   SDLoc dl = CB.DL;
2674 
2675   if (CB.CC == ISD::SETTRUE) {
2676     // Branch or fall through to TrueBB.
2677     addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
2678     SwitchBB->normalizeSuccProbs();
2679     if (CB.TrueBB != NextBlock(SwitchBB)) {
2680       DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, getControlRoot(),
2681                               DAG.getBasicBlock(CB.TrueBB)));
2682     }
2683     return;
2684   }
2685 
2686   auto &TLI = DAG.getTargetLoweringInfo();
2687   EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), CB.CmpLHS->getType());
2688 
2689   // Build the setcc now.
2690   if (!CB.CmpMHS) {
2691     // Fold "(X == true)" to X and "(X == false)" to !X to
2692     // handle common cases produced by branch lowering.
2693     if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
2694         CB.CC == ISD::SETEQ)
2695       Cond = CondLHS;
2696     else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) &&
2697              CB.CC == ISD::SETEQ) {
2698       SDValue True = DAG.getConstant(1, dl, CondLHS.getValueType());
2699       Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
2700     } else {
2701       SDValue CondRHS = getValue(CB.CmpRHS);
2702 
2703       // If a pointer's DAG type is larger than its memory type then the DAG
2704       // values are zero-extended. This breaks signed comparisons so truncate
2705       // back to the underlying type before doing the compare.
2706       if (CondLHS.getValueType() != MemVT) {
2707         CondLHS = DAG.getPtrExtOrTrunc(CondLHS, getCurSDLoc(), MemVT);
2708         CondRHS = DAG.getPtrExtOrTrunc(CondRHS, getCurSDLoc(), MemVT);
2709       }
2710       Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, CondRHS, CB.CC);
2711     }
2712   } else {
2713     assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
2714 
2715     const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
2716     const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
2717 
2718     SDValue CmpOp = getValue(CB.CmpMHS);
2719     EVT VT = CmpOp.getValueType();
2720 
2721     if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
2722       Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, dl, VT),
2723                           ISD::SETLE);
2724     } else {
2725       SDValue SUB = DAG.getNode(ISD::SUB, dl,
2726                                 VT, CmpOp, DAG.getConstant(Low, dl, VT));
2727       Cond = DAG.getSetCC(dl, MVT::i1, SUB,
2728                           DAG.getConstant(High-Low, dl, VT), ISD::SETULE);
2729     }
2730   }
2731 
2732   // Update successor info
2733   addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
2734   // TrueBB and FalseBB are always different unless the incoming IR is
2735   // degenerate. This only happens when running llc on weird IR.
2736   if (CB.TrueBB != CB.FalseBB)
2737     addSuccessorWithProb(SwitchBB, CB.FalseBB, CB.FalseProb);
2738   SwitchBB->normalizeSuccProbs();
2739 
2740   // If the lhs block is the next block, invert the condition so that we can
2741   // fall through to the lhs instead of the rhs block.
2742   if (CB.TrueBB == NextBlock(SwitchBB)) {
2743     std::swap(CB.TrueBB, CB.FalseBB);
2744     SDValue True = DAG.getConstant(1, dl, Cond.getValueType());
2745     Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
2746   }
2747 
2748   SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2749                                MVT::Other, getControlRoot(), Cond,
2750                                DAG.getBasicBlock(CB.TrueBB));
2751 
2752   setValue(CurInst, BrCond);
2753 
2754   // Insert the false branch. Do this even if it's a fall through branch,
2755   // this makes it easier to do DAG optimizations which require inverting
2756   // the branch condition.
2757   BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2758                        DAG.getBasicBlock(CB.FalseBB));
2759 
2760   DAG.setRoot(BrCond);
2761 }
2762 
2763 /// visitJumpTable - Emit JumpTable node in the current MBB
2764 void SelectionDAGBuilder::visitJumpTable(SwitchCG::JumpTable &JT) {
2765   // Emit the code for the jump table
2766   assert(JT.SL && "Should set SDLoc for SelectionDAG!");
2767   assert(JT.Reg != -1U && "Should lower JT Header first!");
2768   EVT PTy = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
2769   SDValue Index = DAG.getCopyFromReg(getControlRoot(), *JT.SL, JT.Reg, PTy);
2770   SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
2771   SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, *JT.SL, MVT::Other,
2772                                     Index.getValue(1), Table, Index);
2773   DAG.setRoot(BrJumpTable);
2774 }
2775 
2776 /// visitJumpTableHeader - This function emits necessary code to produce index
2777 /// in the JumpTable from switch case.
2778 void SelectionDAGBuilder::visitJumpTableHeader(SwitchCG::JumpTable &JT,
2779                                                JumpTableHeader &JTH,
2780                                                MachineBasicBlock *SwitchBB) {
2781   assert(JT.SL && "Should set SDLoc for SelectionDAG!");
2782   const SDLoc &dl = *JT.SL;
2783 
2784   // Subtract the lowest switch case value from the value being switched on.
2785   SDValue SwitchOp = getValue(JTH.SValue);
2786   EVT VT = SwitchOp.getValueType();
2787   SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp,
2788                             DAG.getConstant(JTH.First, dl, VT));
2789 
2790   // The SDNode we just created, which holds the value being switched on minus
2791   // the smallest case value, needs to be copied to a virtual register so it
2792   // can be used as an index into the jump table in a subsequent basic block.
2793   // This value may be smaller or larger than the target's pointer type, and
2794   // therefore require extension or truncating.
2795   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2796   SwitchOp = DAG.getZExtOrTrunc(Sub, dl, TLI.getPointerTy(DAG.getDataLayout()));
2797 
2798   unsigned JumpTableReg =
2799       FuncInfo.CreateReg(TLI.getPointerTy(DAG.getDataLayout()));
2800   SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl,
2801                                     JumpTableReg, SwitchOp);
2802   JT.Reg = JumpTableReg;
2803 
2804   if (!JTH.FallthroughUnreachable) {
2805     // Emit the range check for the jump table, and branch to the default block
2806     // for the switch statement if the value being switched on exceeds the
2807     // largest case in the switch.
2808     SDValue CMP = DAG.getSetCC(
2809         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
2810                                    Sub.getValueType()),
2811         Sub, DAG.getConstant(JTH.Last - JTH.First, dl, VT), ISD::SETUGT);
2812 
2813     SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2814                                  MVT::Other, CopyTo, CMP,
2815                                  DAG.getBasicBlock(JT.Default));
2816 
2817     // Avoid emitting unnecessary branches to the next block.
2818     if (JT.MBB != NextBlock(SwitchBB))
2819       BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2820                            DAG.getBasicBlock(JT.MBB));
2821 
2822     DAG.setRoot(BrCond);
2823   } else {
2824     // Avoid emitting unnecessary branches to the next block.
2825     if (JT.MBB != NextBlock(SwitchBB))
2826       DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, CopyTo,
2827                               DAG.getBasicBlock(JT.MBB)));
2828     else
2829       DAG.setRoot(CopyTo);
2830   }
2831 }
2832 
2833 /// Create a LOAD_STACK_GUARD node, and let it carry the target specific global
2834 /// variable if there exists one.
2835 static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL,
2836                                  SDValue &Chain) {
2837   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2838   EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
2839   EVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout());
2840   MachineFunction &MF = DAG.getMachineFunction();
2841   Value *Global = TLI.getSDagStackGuard(*MF.getFunction().getParent());
2842   MachineSDNode *Node =
2843       DAG.getMachineNode(TargetOpcode::LOAD_STACK_GUARD, DL, PtrTy, Chain);
2844   if (Global) {
2845     MachinePointerInfo MPInfo(Global);
2846     auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
2847                  MachineMemOperand::MODereferenceable;
2848     MachineMemOperand *MemRef = MF.getMachineMemOperand(
2849         MPInfo, Flags, PtrTy.getSizeInBits() / 8, DAG.getEVTAlign(PtrTy));
2850     DAG.setNodeMemRefs(Node, {MemRef});
2851   }
2852   if (PtrTy != PtrMemTy)
2853     return DAG.getPtrExtOrTrunc(SDValue(Node, 0), DL, PtrMemTy);
2854   return SDValue(Node, 0);
2855 }
2856 
2857 /// Codegen a new tail for a stack protector check ParentMBB which has had its
2858 /// tail spliced into a stack protector check success bb.
2859 ///
2860 /// For a high level explanation of how this fits into the stack protector
2861 /// generation see the comment on the declaration of class
2862 /// StackProtectorDescriptor.
2863 void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
2864                                                   MachineBasicBlock *ParentBB) {
2865 
2866   // First create the loads to the guard/stack slot for the comparison.
2867   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2868   EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
2869   EVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout());
2870 
2871   MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
2872   int FI = MFI.getStackProtectorIndex();
2873 
2874   SDValue Guard;
2875   SDLoc dl = getCurSDLoc();
2876   SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy);
2877   const Module &M = *ParentBB->getParent()->getFunction().getParent();
2878   Align Align =
2879       DAG.getDataLayout().getPrefTypeAlign(PointerType::get(M.getContext(), 0));
2880 
2881   // Generate code to load the content of the guard slot.
2882   SDValue GuardVal = DAG.getLoad(
2883       PtrMemTy, dl, DAG.getEntryNode(), StackSlotPtr,
2884       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), Align,
2885       MachineMemOperand::MOVolatile);
2886 
2887   if (TLI.useStackGuardXorFP())
2888     GuardVal = TLI.emitStackGuardXorFP(DAG, GuardVal, dl);
2889 
2890   // Retrieve guard check function, nullptr if instrumentation is inlined.
2891   if (const Function *GuardCheckFn = TLI.getSSPStackGuardCheck(M)) {
2892     // The target provides a guard check function to validate the guard value.
2893     // Generate a call to that function with the content of the guard slot as
2894     // argument.
2895     FunctionType *FnTy = GuardCheckFn->getFunctionType();
2896     assert(FnTy->getNumParams() == 1 && "Invalid function signature");
2897 
2898     TargetLowering::ArgListTy Args;
2899     TargetLowering::ArgListEntry Entry;
2900     Entry.Node = GuardVal;
2901     Entry.Ty = FnTy->getParamType(0);
2902     if (GuardCheckFn->hasParamAttribute(0, Attribute::AttrKind::InReg))
2903       Entry.IsInReg = true;
2904     Args.push_back(Entry);
2905 
2906     TargetLowering::CallLoweringInfo CLI(DAG);
2907     CLI.setDebugLoc(getCurSDLoc())
2908         .setChain(DAG.getEntryNode())
2909         .setCallee(GuardCheckFn->getCallingConv(), FnTy->getReturnType(),
2910                    getValue(GuardCheckFn), std::move(Args));
2911 
2912     std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
2913     DAG.setRoot(Result.second);
2914     return;
2915   }
2916 
2917   // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
2918   // Otherwise, emit a volatile load to retrieve the stack guard value.
2919   SDValue Chain = DAG.getEntryNode();
2920   if (TLI.useLoadStackGuardNode()) {
2921     Guard = getLoadStackGuard(DAG, dl, Chain);
2922   } else {
2923     const Value *IRGuard = TLI.getSDagStackGuard(M);
2924     SDValue GuardPtr = getValue(IRGuard);
2925 
2926     Guard = DAG.getLoad(PtrMemTy, dl, Chain, GuardPtr,
2927                         MachinePointerInfo(IRGuard, 0), Align,
2928                         MachineMemOperand::MOVolatile);
2929   }
2930 
2931   // Perform the comparison via a getsetcc.
2932   SDValue Cmp = DAG.getSetCC(dl, TLI.getSetCCResultType(DAG.getDataLayout(),
2933                                                         *DAG.getContext(),
2934                                                         Guard.getValueType()),
2935                              Guard, GuardVal, ISD::SETNE);
2936 
2937   // If the guard/stackslot do not equal, branch to failure MBB.
2938   SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2939                                MVT::Other, GuardVal.getOperand(0),
2940                                Cmp, DAG.getBasicBlock(SPD.getFailureMBB()));
2941   // Otherwise branch to success MBB.
2942   SDValue Br = DAG.getNode(ISD::BR, dl,
2943                            MVT::Other, BrCond,
2944                            DAG.getBasicBlock(SPD.getSuccessMBB()));
2945 
2946   DAG.setRoot(Br);
2947 }
2948 
2949 /// Codegen the failure basic block for a stack protector check.
2950 ///
2951 /// A failure stack protector machine basic block consists simply of a call to
2952 /// __stack_chk_fail().
2953 ///
2954 /// For a high level explanation of how this fits into the stack protector
2955 /// generation see the comment on the declaration of class
2956 /// StackProtectorDescriptor.
2957 void
2958 SelectionDAGBuilder::visitSPDescriptorFailure(StackProtectorDescriptor &SPD) {
2959   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2960   TargetLowering::MakeLibCallOptions CallOptions;
2961   CallOptions.setDiscardResult(true);
2962   SDValue Chain =
2963       TLI.makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid,
2964                       std::nullopt, CallOptions, getCurSDLoc())
2965           .second;
2966   // On PS4/PS5, the "return address" must still be within the calling
2967   // function, even if it's at the very end, so emit an explicit TRAP here.
2968   // Passing 'true' for doesNotReturn above won't generate the trap for us.
2969   if (TM.getTargetTriple().isPS())
2970     Chain = DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, Chain);
2971   // WebAssembly needs an unreachable instruction after a non-returning call,
2972   // because the function return type can be different from __stack_chk_fail's
2973   // return type (void).
2974   if (TM.getTargetTriple().isWasm())
2975     Chain = DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, Chain);
2976 
2977   DAG.setRoot(Chain);
2978 }
2979 
2980 /// visitBitTestHeader - This function emits necessary code to produce value
2981 /// suitable for "bit tests"
2982 void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
2983                                              MachineBasicBlock *SwitchBB) {
2984   SDLoc dl = getCurSDLoc();
2985 
2986   // Subtract the minimum value.
2987   SDValue SwitchOp = getValue(B.SValue);
2988   EVT VT = SwitchOp.getValueType();
2989   SDValue RangeSub =
2990       DAG.getNode(ISD::SUB, dl, VT, SwitchOp, DAG.getConstant(B.First, dl, VT));
2991 
2992   // Determine the type of the test operands.
2993   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2994   bool UsePtrType = false;
2995   if (!TLI.isTypeLegal(VT)) {
2996     UsePtrType = true;
2997   } else {
2998     for (unsigned i = 0, e = B.Cases.size(); i != e; ++i)
2999       if (!isUIntN(VT.getSizeInBits(), B.Cases[i].Mask)) {
3000         // Switch table case range are encoded into series of masks.
3001         // Just use pointer type, it's guaranteed to fit.
3002         UsePtrType = true;
3003         break;
3004       }
3005   }
3006   SDValue Sub = RangeSub;
3007   if (UsePtrType) {
3008     VT = TLI.getPointerTy(DAG.getDataLayout());
3009     Sub = DAG.getZExtOrTrunc(Sub, dl, VT);
3010   }
3011 
3012   B.RegVT = VT.getSimpleVT();
3013   B.Reg = FuncInfo.CreateReg(B.RegVT);
3014   SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl, B.Reg, Sub);
3015 
3016   MachineBasicBlock* MBB = B.Cases[0].ThisBB;
3017 
3018   if (!B.FallthroughUnreachable)
3019     addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb);
3020   addSuccessorWithProb(SwitchBB, MBB, B.Prob);
3021   SwitchBB->normalizeSuccProbs();
3022 
3023   SDValue Root = CopyTo;
3024   if (!B.FallthroughUnreachable) {
3025     // Conditional branch to the default block.
3026     SDValue RangeCmp = DAG.getSetCC(dl,
3027         TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
3028                                RangeSub.getValueType()),
3029         RangeSub, DAG.getConstant(B.Range, dl, RangeSub.getValueType()),
3030         ISD::SETUGT);
3031 
3032     Root = DAG.getNode(ISD::BRCOND, dl, MVT::Other, Root, RangeCmp,
3033                        DAG.getBasicBlock(B.Default));
3034   }
3035 
3036   // Avoid emitting unnecessary branches to the next block.
3037   if (MBB != NextBlock(SwitchBB))
3038     Root = DAG.getNode(ISD::BR, dl, MVT::Other, Root, DAG.getBasicBlock(MBB));
3039 
3040   DAG.setRoot(Root);
3041 }
3042 
3043 /// visitBitTestCase - this function produces one "bit test"
3044 void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB,
3045                                            MachineBasicBlock* NextMBB,
3046                                            BranchProbability BranchProbToNext,
3047                                            unsigned Reg,
3048                                            BitTestCase &B,
3049                                            MachineBasicBlock *SwitchBB) {
3050   SDLoc dl = getCurSDLoc();
3051   MVT VT = BB.RegVT;
3052   SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), dl, Reg, VT);
3053   SDValue Cmp;
3054   unsigned PopCount = llvm::popcount(B.Mask);
3055   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3056   if (PopCount == 1) {
3057     // Testing for a single bit; just compare the shift count with what it
3058     // would need to be to shift a 1 bit in that position.
3059     Cmp = DAG.getSetCC(
3060         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
3061         ShiftOp, DAG.getConstant(llvm::countr_zero(B.Mask), dl, VT),
3062         ISD::SETEQ);
3063   } else if (PopCount == BB.Range) {
3064     // There is only one zero bit in the range, test for it directly.
3065     Cmp = DAG.getSetCC(
3066         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
3067         ShiftOp, DAG.getConstant(llvm::countr_one(B.Mask), dl, VT), ISD::SETNE);
3068   } else {
3069     // Make desired shift
3070     SDValue SwitchVal = DAG.getNode(ISD::SHL, dl, VT,
3071                                     DAG.getConstant(1, dl, VT), ShiftOp);
3072 
3073     // Emit bit tests and jumps
3074     SDValue AndOp = DAG.getNode(ISD::AND, dl,
3075                                 VT, SwitchVal, DAG.getConstant(B.Mask, dl, VT));
3076     Cmp = DAG.getSetCC(
3077         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
3078         AndOp, DAG.getConstant(0, dl, VT), ISD::SETNE);
3079   }
3080 
3081   // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
3082   addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb);
3083   // The branch probability from SwitchBB to NextMBB is BranchProbToNext.
3084   addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
3085   // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is
3086   // one as they are relative probabilities (and thus work more like weights),
3087   // and hence we need to normalize them to let the sum of them become one.
3088   SwitchBB->normalizeSuccProbs();
3089 
3090   SDValue BrAnd = DAG.getNode(ISD::BRCOND, dl,
3091                               MVT::Other, getControlRoot(),
3092                               Cmp, DAG.getBasicBlock(B.TargetBB));
3093 
3094   // Avoid emitting unnecessary branches to the next block.
3095   if (NextMBB != NextBlock(SwitchBB))
3096     BrAnd = DAG.getNode(ISD::BR, dl, MVT::Other, BrAnd,
3097                         DAG.getBasicBlock(NextMBB));
3098 
3099   DAG.setRoot(BrAnd);
3100 }
3101 
3102 void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
3103   MachineBasicBlock *InvokeMBB = FuncInfo.MBB;
3104 
3105   // Retrieve successors. Look through artificial IR level blocks like
3106   // catchswitch for successors.
3107   MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
3108   const BasicBlock *EHPadBB = I.getSuccessor(1);
3109   MachineBasicBlock *EHPadMBB = FuncInfo.MBBMap[EHPadBB];
3110 
3111   // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
3112   // have to do anything here to lower funclet bundles.
3113   assert(!I.hasOperandBundlesOtherThan(
3114              {LLVMContext::OB_deopt, LLVMContext::OB_gc_transition,
3115               LLVMContext::OB_gc_live, LLVMContext::OB_funclet,
3116               LLVMContext::OB_cfguardtarget,
3117               LLVMContext::OB_clang_arc_attachedcall}) &&
3118          "Cannot lower invokes with arbitrary operand bundles yet!");
3119 
3120   const Value *Callee(I.getCalledOperand());
3121   const Function *Fn = dyn_cast<Function>(Callee);
3122   if (isa<InlineAsm>(Callee))
3123     visitInlineAsm(I, EHPadBB);
3124   else if (Fn && Fn->isIntrinsic()) {
3125     switch (Fn->getIntrinsicID()) {
3126     default:
3127       llvm_unreachable("Cannot invoke this intrinsic");
3128     case Intrinsic::donothing:
3129       // Ignore invokes to @llvm.donothing: jump directly to the next BB.
3130     case Intrinsic::seh_try_begin:
3131     case Intrinsic::seh_scope_begin:
3132     case Intrinsic::seh_try_end:
3133     case Intrinsic::seh_scope_end:
3134       if (EHPadMBB)
3135           // a block referenced by EH table
3136           // so dtor-funclet not removed by opts
3137           EHPadMBB->setMachineBlockAddressTaken();
3138       break;
3139     case Intrinsic::experimental_patchpoint_void:
3140     case Intrinsic::experimental_patchpoint_i64:
3141       visitPatchpoint(I, EHPadBB);
3142       break;
3143     case Intrinsic::experimental_gc_statepoint:
3144       LowerStatepoint(cast<GCStatepointInst>(I), EHPadBB);
3145       break;
3146     case Intrinsic::wasm_rethrow: {
3147       // This is usually done in visitTargetIntrinsic, but this intrinsic is
3148       // special because it can be invoked, so we manually lower it to a DAG
3149       // node here.
3150       SmallVector<SDValue, 8> Ops;
3151       Ops.push_back(getRoot()); // inchain
3152       const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3153       Ops.push_back(
3154           DAG.getTargetConstant(Intrinsic::wasm_rethrow, getCurSDLoc(),
3155                                 TLI.getPointerTy(DAG.getDataLayout())));
3156       SDVTList VTs = DAG.getVTList(ArrayRef<EVT>({MVT::Other})); // outchain
3157       DAG.setRoot(DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops));
3158       break;
3159     }
3160     }
3161   } else if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) {
3162     // Currently we do not lower any intrinsic calls with deopt operand bundles.
3163     // Eventually we will support lowering the @llvm.experimental.deoptimize
3164     // intrinsic, and right now there are no plans to support other intrinsics
3165     // with deopt state.
3166     LowerCallSiteWithDeoptBundle(&I, getValue(Callee), EHPadBB);
3167   } else {
3168     LowerCallTo(I, getValue(Callee), false, false, EHPadBB);
3169   }
3170 
3171   // If the value of the invoke is used outside of its defining block, make it
3172   // available as a virtual register.
3173   // We already took care of the exported value for the statepoint instruction
3174   // during call to the LowerStatepoint.
3175   if (!isa<GCStatepointInst>(I)) {
3176     CopyToExportRegsIfNeeded(&I);
3177   }
3178 
3179   SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
3180   BranchProbabilityInfo *BPI = FuncInfo.BPI;
3181   BranchProbability EHPadBBProb =
3182       BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB)
3183           : BranchProbability::getZero();
3184   findUnwindDestinations(FuncInfo, EHPadBB, EHPadBBProb, UnwindDests);
3185 
3186   // Update successor info.
3187   addSuccessorWithProb(InvokeMBB, Return);
3188   for (auto &UnwindDest : UnwindDests) {
3189     UnwindDest.first->setIsEHPad();
3190     addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
3191   }
3192   InvokeMBB->normalizeSuccProbs();
3193 
3194   // Drop into normal successor.
3195   DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, getControlRoot(),
3196                           DAG.getBasicBlock(Return)));
3197 }
3198 
3199 void SelectionDAGBuilder::visitCallBr(const CallBrInst &I) {
3200   MachineBasicBlock *CallBrMBB = FuncInfo.MBB;
3201 
3202   // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
3203   // have to do anything here to lower funclet bundles.
3204   assert(!I.hasOperandBundlesOtherThan(
3205              {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) &&
3206          "Cannot lower callbrs with arbitrary operand bundles yet!");
3207 
3208   assert(I.isInlineAsm() && "Only know how to handle inlineasm callbr");
3209   visitInlineAsm(I);
3210   CopyToExportRegsIfNeeded(&I);
3211 
3212   // Retrieve successors.
3213   SmallPtrSet<BasicBlock *, 8> Dests;
3214   Dests.insert(I.getDefaultDest());
3215   MachineBasicBlock *Return = FuncInfo.MBBMap[I.getDefaultDest()];
3216 
3217   // Update successor info.
3218   addSuccessorWithProb(CallBrMBB, Return, BranchProbability::getOne());
3219   for (unsigned i = 0, e = I.getNumIndirectDests(); i < e; ++i) {
3220     BasicBlock *Dest = I.getIndirectDest(i);
3221     MachineBasicBlock *Target = FuncInfo.MBBMap[Dest];
3222     Target->setIsInlineAsmBrIndirectTarget();
3223     Target->setMachineBlockAddressTaken();
3224     Target->setLabelMustBeEmitted();
3225     // Don't add duplicate machine successors.
3226     if (Dests.insert(Dest).second)
3227       addSuccessorWithProb(CallBrMBB, Target, BranchProbability::getZero());
3228   }
3229   CallBrMBB->normalizeSuccProbs();
3230 
3231   // Drop into default successor.
3232   DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
3233                           MVT::Other, getControlRoot(),
3234                           DAG.getBasicBlock(Return)));
3235 }
3236 
3237 void SelectionDAGBuilder::visitResume(const ResumeInst &RI) {
3238   llvm_unreachable("SelectionDAGBuilder shouldn't visit resume instructions!");
3239 }
3240 
3241 void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) {
3242   assert(FuncInfo.MBB->isEHPad() &&
3243          "Call to landingpad not in landing pad!");
3244 
3245   // If there aren't registers to copy the values into (e.g., during SjLj
3246   // exceptions), then don't bother to create these DAG nodes.
3247   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3248   const Constant *PersonalityFn = FuncInfo.Fn->getPersonalityFn();
3249   if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
3250       TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
3251     return;
3252 
3253   // If landingpad's return type is token type, we don't create DAG nodes
3254   // for its exception pointer and selector value. The extraction of exception
3255   // pointer or selector value from token type landingpads is not currently
3256   // supported.
3257   if (LP.getType()->isTokenTy())
3258     return;
3259 
3260   SmallVector<EVT, 2> ValueVTs;
3261   SDLoc dl = getCurSDLoc();
3262   ComputeValueVTs(TLI, DAG.getDataLayout(), LP.getType(), ValueVTs);
3263   assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported");
3264 
3265   // Get the two live-in registers as SDValues. The physregs have already been
3266   // copied into virtual registers.
3267   SDValue Ops[2];
3268   if (FuncInfo.ExceptionPointerVirtReg) {
3269     Ops[0] = DAG.getZExtOrTrunc(
3270         DAG.getCopyFromReg(DAG.getEntryNode(), dl,
3271                            FuncInfo.ExceptionPointerVirtReg,
3272                            TLI.getPointerTy(DAG.getDataLayout())),
3273         dl, ValueVTs[0]);
3274   } else {
3275     Ops[0] = DAG.getConstant(0, dl, TLI.getPointerTy(DAG.getDataLayout()));
3276   }
3277   Ops[1] = DAG.getZExtOrTrunc(
3278       DAG.getCopyFromReg(DAG.getEntryNode(), dl,
3279                          FuncInfo.ExceptionSelectorVirtReg,
3280                          TLI.getPointerTy(DAG.getDataLayout())),
3281       dl, ValueVTs[1]);
3282 
3283   // Merge into one.
3284   SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl,
3285                             DAG.getVTList(ValueVTs), Ops);
3286   setValue(&LP, Res);
3287 }
3288 
3289 void SelectionDAGBuilder::UpdateSplitBlock(MachineBasicBlock *First,
3290                                            MachineBasicBlock *Last) {
3291   // Update JTCases.
3292   for (JumpTableBlock &JTB : SL->JTCases)
3293     if (JTB.first.HeaderBB == First)
3294       JTB.first.HeaderBB = Last;
3295 
3296   // Update BitTestCases.
3297   for (BitTestBlock &BTB : SL->BitTestCases)
3298     if (BTB.Parent == First)
3299       BTB.Parent = Last;
3300 }
3301 
3302 void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
3303   MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB;
3304 
3305   // Update machine-CFG edges with unique successors.
3306   SmallSet<BasicBlock*, 32> Done;
3307   for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) {
3308     BasicBlock *BB = I.getSuccessor(i);
3309     bool Inserted = Done.insert(BB).second;
3310     if (!Inserted)
3311         continue;
3312 
3313     MachineBasicBlock *Succ = FuncInfo.MBBMap[BB];
3314     addSuccessorWithProb(IndirectBrMBB, Succ);
3315   }
3316   IndirectBrMBB->normalizeSuccProbs();
3317 
3318   DAG.setRoot(DAG.getNode(ISD::BRIND, getCurSDLoc(),
3319                           MVT::Other, getControlRoot(),
3320                           getValue(I.getAddress())));
3321 }
3322 
3323 void SelectionDAGBuilder::visitUnreachable(const UnreachableInst &I) {
3324   if (!DAG.getTarget().Options.TrapUnreachable)
3325     return;
3326 
3327   // We may be able to ignore unreachable behind a noreturn call.
3328   if (DAG.getTarget().Options.NoTrapAfterNoreturn) {
3329     if (const CallInst *Call = dyn_cast_or_null<CallInst>(I.getPrevNode())) {
3330       if (Call->doesNotReturn())
3331         return;
3332     }
3333   }
3334 
3335   DAG.setRoot(DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, DAG.getRoot()));
3336 }
3337 
3338 void SelectionDAGBuilder::visitUnary(const User &I, unsigned Opcode) {
3339   SDNodeFlags Flags;
3340   if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
3341     Flags.copyFMF(*FPOp);
3342 
3343   SDValue Op = getValue(I.getOperand(0));
3344   SDValue UnNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op.getValueType(),
3345                                     Op, Flags);
3346   setValue(&I, UnNodeValue);
3347 }
3348 
3349 void SelectionDAGBuilder::visitBinary(const User &I, unsigned Opcode) {
3350   SDNodeFlags Flags;
3351   if (auto *OFBinOp = dyn_cast<OverflowingBinaryOperator>(&I)) {
3352     Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap());
3353     Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap());
3354   }
3355   if (auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I))
3356     Flags.setExact(ExactOp->isExact());
3357   if (auto *DisjointOp = dyn_cast<PossiblyDisjointInst>(&I))
3358     Flags.setDisjoint(DisjointOp->isDisjoint());
3359   if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
3360     Flags.copyFMF(*FPOp);
3361 
3362   SDValue Op1 = getValue(I.getOperand(0));
3363   SDValue Op2 = getValue(I.getOperand(1));
3364   SDValue BinNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(),
3365                                      Op1, Op2, Flags);
3366   setValue(&I, BinNodeValue);
3367 }
3368 
3369 void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
3370   SDValue Op1 = getValue(I.getOperand(0));
3371   SDValue Op2 = getValue(I.getOperand(1));
3372 
3373   EVT ShiftTy = DAG.getTargetLoweringInfo().getShiftAmountTy(
3374       Op1.getValueType(), DAG.getDataLayout());
3375 
3376   // Coerce the shift amount to the right type if we can. This exposes the
3377   // truncate or zext to optimization early.
3378   if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {
3379     assert(ShiftTy.getSizeInBits() >= Log2_32_Ceil(Op1.getValueSizeInBits()) &&
3380            "Unexpected shift type");
3381     Op2 = DAG.getZExtOrTrunc(Op2, getCurSDLoc(), ShiftTy);
3382   }
3383 
3384   bool nuw = false;
3385   bool nsw = false;
3386   bool exact = false;
3387 
3388   if (Opcode == ISD::SRL || Opcode == ISD::SRA || Opcode == ISD::SHL) {
3389 
3390     if (const OverflowingBinaryOperator *OFBinOp =
3391             dyn_cast<const OverflowingBinaryOperator>(&I)) {
3392       nuw = OFBinOp->hasNoUnsignedWrap();
3393       nsw = OFBinOp->hasNoSignedWrap();
3394     }
3395     if (const PossiblyExactOperator *ExactOp =
3396             dyn_cast<const PossiblyExactOperator>(&I))
3397       exact = ExactOp->isExact();
3398   }
3399   SDNodeFlags Flags;
3400   Flags.setExact(exact);
3401   Flags.setNoSignedWrap(nsw);
3402   Flags.setNoUnsignedWrap(nuw);
3403   SDValue Res = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(), Op1, Op2,
3404                             Flags);
3405   setValue(&I, Res);
3406 }
3407 
3408 void SelectionDAGBuilder::visitSDiv(const User &I) {
3409   SDValue Op1 = getValue(I.getOperand(0));
3410   SDValue Op2 = getValue(I.getOperand(1));
3411 
3412   SDNodeFlags Flags;
3413   Flags.setExact(isa<PossiblyExactOperator>(&I) &&
3414                  cast<PossiblyExactOperator>(&I)->isExact());
3415   setValue(&I, DAG.getNode(ISD::SDIV, getCurSDLoc(), Op1.getValueType(), Op1,
3416                            Op2, Flags));
3417 }
3418 
3419 void SelectionDAGBuilder::visitICmp(const User &I) {
3420   ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
3421   if (const ICmpInst *IC = dyn_cast<ICmpInst>(&I))
3422     predicate = IC->getPredicate();
3423   else if (const ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
3424     predicate = ICmpInst::Predicate(IC->getPredicate());
3425   SDValue Op1 = getValue(I.getOperand(0));
3426   SDValue Op2 = getValue(I.getOperand(1));
3427   ISD::CondCode Opcode = getICmpCondCode(predicate);
3428 
3429   auto &TLI = DAG.getTargetLoweringInfo();
3430   EVT MemVT =
3431       TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
3432 
3433   // If a pointer's DAG type is larger than its memory type then the DAG values
3434   // are zero-extended. This breaks signed comparisons so truncate back to the
3435   // underlying type before doing the compare.
3436   if (Op1.getValueType() != MemVT) {
3437     Op1 = DAG.getPtrExtOrTrunc(Op1, getCurSDLoc(), MemVT);
3438     Op2 = DAG.getPtrExtOrTrunc(Op2, getCurSDLoc(), MemVT);
3439   }
3440 
3441   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3442                                                         I.getType());
3443   setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Opcode));
3444 }
3445 
3446 void SelectionDAGBuilder::visitFCmp(const User &I) {
3447   FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
3448   if (const FCmpInst *FC = dyn_cast<FCmpInst>(&I))
3449     predicate = FC->getPredicate();
3450   else if (const ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
3451     predicate = FCmpInst::Predicate(FC->getPredicate());
3452   SDValue Op1 = getValue(I.getOperand(0));
3453   SDValue Op2 = getValue(I.getOperand(1));
3454 
3455   ISD::CondCode Condition = getFCmpCondCode(predicate);
3456   auto *FPMO = cast<FPMathOperator>(&I);
3457   if (FPMO->hasNoNaNs() || TM.Options.NoNaNsFPMath)
3458     Condition = getFCmpCodeWithoutNaN(Condition);
3459 
3460   SDNodeFlags Flags;
3461   Flags.copyFMF(*FPMO);
3462   SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
3463 
3464   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3465                                                         I.getType());
3466   setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition));
3467 }
3468 
3469 // Check if the condition of the select has one use or two users that are both
3470 // selects with the same condition.
3471 static bool hasOnlySelectUsers(const Value *Cond) {
3472   return llvm::all_of(Cond->users(), [](const Value *V) {
3473     return isa<SelectInst>(V);
3474   });
3475 }
3476 
3477 void SelectionDAGBuilder::visitSelect(const User &I) {
3478   SmallVector<EVT, 4> ValueVTs;
3479   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
3480                   ValueVTs);
3481   unsigned NumValues = ValueVTs.size();
3482   if (NumValues == 0) return;
3483 
3484   SmallVector<SDValue, 4> Values(NumValues);
3485   SDValue Cond     = getValue(I.getOperand(0));
3486   SDValue LHSVal   = getValue(I.getOperand(1));
3487   SDValue RHSVal   = getValue(I.getOperand(2));
3488   SmallVector<SDValue, 1> BaseOps(1, Cond);
3489   ISD::NodeType OpCode =
3490       Cond.getValueType().isVector() ? ISD::VSELECT : ISD::SELECT;
3491 
3492   bool IsUnaryAbs = false;
3493   bool Negate = false;
3494 
3495   SDNodeFlags Flags;
3496   if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
3497     Flags.copyFMF(*FPOp);
3498 
3499   Flags.setUnpredictable(
3500       cast<SelectInst>(I).getMetadata(LLVMContext::MD_unpredictable));
3501 
3502   // Min/max matching is only viable if all output VTs are the same.
3503   if (all_equal(ValueVTs)) {
3504     EVT VT = ValueVTs[0];
3505     LLVMContext &Ctx = *DAG.getContext();
3506     auto &TLI = DAG.getTargetLoweringInfo();
3507 
3508     // We care about the legality of the operation after it has been type
3509     // legalized.
3510     while (TLI.getTypeAction(Ctx, VT) != TargetLoweringBase::TypeLegal)
3511       VT = TLI.getTypeToTransformTo(Ctx, VT);
3512 
3513     // If the vselect is legal, assume we want to leave this as a vector setcc +
3514     // vselect. Otherwise, if this is going to be scalarized, we want to see if
3515     // min/max is legal on the scalar type.
3516     bool UseScalarMinMax = VT.isVector() &&
3517       !TLI.isOperationLegalOrCustom(ISD::VSELECT, VT);
3518 
3519     // ValueTracking's select pattern matching does not account for -0.0,
3520     // so we can't lower to FMINIMUM/FMAXIMUM because those nodes specify that
3521     // -0.0 is less than +0.0.
3522     Value *LHS, *RHS;
3523     auto SPR = matchSelectPattern(const_cast<User*>(&I), LHS, RHS);
3524     ISD::NodeType Opc = ISD::DELETED_NODE;
3525     switch (SPR.Flavor) {
3526     case SPF_UMAX:    Opc = ISD::UMAX; break;
3527     case SPF_UMIN:    Opc = ISD::UMIN; break;
3528     case SPF_SMAX:    Opc = ISD::SMAX; break;
3529     case SPF_SMIN:    Opc = ISD::SMIN; break;
3530     case SPF_FMINNUM:
3531       switch (SPR.NaNBehavior) {
3532       case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
3533       case SPNB_RETURNS_NAN: break;
3534       case SPNB_RETURNS_OTHER: Opc = ISD::FMINNUM; break;
3535       case SPNB_RETURNS_ANY:
3536         if (TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT) ||
3537             (UseScalarMinMax &&
3538              TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT.getScalarType())))
3539           Opc = ISD::FMINNUM;
3540         break;
3541       }
3542       break;
3543     case SPF_FMAXNUM:
3544       switch (SPR.NaNBehavior) {
3545       case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
3546       case SPNB_RETURNS_NAN: break;
3547       case SPNB_RETURNS_OTHER: Opc = ISD::FMAXNUM; break;
3548       case SPNB_RETURNS_ANY:
3549         if (TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT) ||
3550             (UseScalarMinMax &&
3551              TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT.getScalarType())))
3552           Opc = ISD::FMAXNUM;
3553         break;
3554       }
3555       break;
3556     case SPF_NABS:
3557       Negate = true;
3558       [[fallthrough]];
3559     case SPF_ABS:
3560       IsUnaryAbs = true;
3561       Opc = ISD::ABS;
3562       break;
3563     default: break;
3564     }
3565 
3566     if (!IsUnaryAbs && Opc != ISD::DELETED_NODE &&
3567         (TLI.isOperationLegalOrCustomOrPromote(Opc, VT) ||
3568          (UseScalarMinMax &&
3569           TLI.isOperationLegalOrCustom(Opc, VT.getScalarType()))) &&
3570         // If the underlying comparison instruction is used by any other
3571         // instruction, the consumed instructions won't be destroyed, so it is
3572         // not profitable to convert to a min/max.
3573         hasOnlySelectUsers(cast<SelectInst>(I).getCondition())) {
3574       OpCode = Opc;
3575       LHSVal = getValue(LHS);
3576       RHSVal = getValue(RHS);
3577       BaseOps.clear();
3578     }
3579 
3580     if (IsUnaryAbs) {
3581       OpCode = Opc;
3582       LHSVal = getValue(LHS);
3583       BaseOps.clear();
3584     }
3585   }
3586 
3587   if (IsUnaryAbs) {
3588     for (unsigned i = 0; i != NumValues; ++i) {
3589       SDLoc dl = getCurSDLoc();
3590       EVT VT = LHSVal.getNode()->getValueType(LHSVal.getResNo() + i);
3591       Values[i] =
3592           DAG.getNode(OpCode, dl, VT, LHSVal.getValue(LHSVal.getResNo() + i));
3593       if (Negate)
3594         Values[i] = DAG.getNegative(Values[i], dl, VT);
3595     }
3596   } else {
3597     for (unsigned i = 0; i != NumValues; ++i) {
3598       SmallVector<SDValue, 3> Ops(BaseOps.begin(), BaseOps.end());
3599       Ops.push_back(SDValue(LHSVal.getNode(), LHSVal.getResNo() + i));
3600       Ops.push_back(SDValue(RHSVal.getNode(), RHSVal.getResNo() + i));
3601       Values[i] = DAG.getNode(
3602           OpCode, getCurSDLoc(),
3603           LHSVal.getNode()->getValueType(LHSVal.getResNo() + i), Ops, Flags);
3604     }
3605   }
3606 
3607   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3608                            DAG.getVTList(ValueVTs), Values));
3609 }
3610 
3611 void SelectionDAGBuilder::visitTrunc(const User &I) {
3612   // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
3613   SDValue N = getValue(I.getOperand(0));
3614   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3615                                                         I.getType());
3616   setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), DestVT, N));
3617 }
3618 
3619 void SelectionDAGBuilder::visitZExt(const User &I) {
3620   // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3621   // ZExt also can't be a cast to bool for same reason. So, nothing much to do
3622   SDValue N = getValue(I.getOperand(0));
3623   auto &TLI = DAG.getTargetLoweringInfo();
3624   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3625 
3626   SDNodeFlags Flags;
3627   if (auto *PNI = dyn_cast<PossiblyNonNegInst>(&I))
3628     Flags.setNonNeg(PNI->hasNonNeg());
3629 
3630   // Eagerly use nonneg information to canonicalize towards sign_extend if
3631   // that is the target's preference.
3632   // TODO: Let the target do this later.
3633   if (Flags.hasNonNeg() &&
3634       TLI.isSExtCheaperThanZExt(N.getValueType(), DestVT)) {
3635     setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N));
3636     return;
3637   }
3638 
3639   setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurSDLoc(), DestVT, N, Flags));
3640 }
3641 
3642 void SelectionDAGBuilder::visitSExt(const User &I) {
3643   // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3644   // SExt also can't be a cast to bool for same reason. So, nothing much to do
3645   SDValue N = getValue(I.getOperand(0));
3646   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3647                                                         I.getType());
3648   setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N));
3649 }
3650 
3651 void SelectionDAGBuilder::visitFPTrunc(const User &I) {
3652   // FPTrunc is never a no-op cast, no need to check
3653   SDValue N = getValue(I.getOperand(0));
3654   SDLoc dl = getCurSDLoc();
3655   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3656   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3657   setValue(&I, DAG.getNode(ISD::FP_ROUND, dl, DestVT, N,
3658                            DAG.getTargetConstant(
3659                                0, dl, TLI.getPointerTy(DAG.getDataLayout()))));
3660 }
3661 
3662 void SelectionDAGBuilder::visitFPExt(const User &I) {
3663   // FPExt is never a no-op cast, no need to check
3664   SDValue N = getValue(I.getOperand(0));
3665   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3666                                                         I.getType());
3667   setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurSDLoc(), DestVT, N));
3668 }
3669 
3670 void SelectionDAGBuilder::visitFPToUI(const User &I) {
3671   // FPToUI is never a no-op cast, no need to check
3672   SDValue N = getValue(I.getOperand(0));
3673   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3674                                                         I.getType());
3675   setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurSDLoc(), DestVT, N));
3676 }
3677 
3678 void SelectionDAGBuilder::visitFPToSI(const User &I) {
3679   // FPToSI is never a no-op cast, no need to check
3680   SDValue N = getValue(I.getOperand(0));
3681   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3682                                                         I.getType());
3683   setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurSDLoc(), DestVT, N));
3684 }
3685 
3686 void SelectionDAGBuilder::visitUIToFP(const User &I) {
3687   // UIToFP is never a no-op cast, no need to check
3688   SDValue N = getValue(I.getOperand(0));
3689   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3690                                                         I.getType());
3691   setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurSDLoc(), DestVT, N));
3692 }
3693 
3694 void SelectionDAGBuilder::visitSIToFP(const User &I) {
3695   // SIToFP is never a no-op cast, no need to check
3696   SDValue N = getValue(I.getOperand(0));
3697   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3698                                                         I.getType());
3699   setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurSDLoc(), DestVT, N));
3700 }
3701 
3702 void SelectionDAGBuilder::visitPtrToInt(const User &I) {
3703   // What to do depends on the size of the integer and the size of the pointer.
3704   // We can either truncate, zero extend, or no-op, accordingly.
3705   SDValue N = getValue(I.getOperand(0));
3706   auto &TLI = DAG.getTargetLoweringInfo();
3707   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3708                                                         I.getType());
3709   EVT PtrMemVT =
3710       TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
3711   N = DAG.getPtrExtOrTrunc(N, getCurSDLoc(), PtrMemVT);
3712   N = DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT);
3713   setValue(&I, N);
3714 }
3715 
3716 void SelectionDAGBuilder::visitIntToPtr(const User &I) {
3717   // What to do depends on the size of the integer and the size of the pointer.
3718   // We can either truncate, zero extend, or no-op, accordingly.
3719   SDValue N = getValue(I.getOperand(0));
3720   auto &TLI = DAG.getTargetLoweringInfo();
3721   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3722   EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getType());
3723   N = DAG.getZExtOrTrunc(N, getCurSDLoc(), PtrMemVT);
3724   N = DAG.getPtrExtOrTrunc(N, getCurSDLoc(), DestVT);
3725   setValue(&I, N);
3726 }
3727 
3728 void SelectionDAGBuilder::visitBitCast(const User &I) {
3729   SDValue N = getValue(I.getOperand(0));
3730   SDLoc dl = getCurSDLoc();
3731   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3732                                                         I.getType());
3733 
3734   // BitCast assures us that source and destination are the same size so this is
3735   // either a BITCAST or a no-op.
3736   if (DestVT != N.getValueType())
3737     setValue(&I, DAG.getNode(ISD::BITCAST, dl,
3738                              DestVT, N)); // convert types.
3739   // Check if the original LLVM IR Operand was a ConstantInt, because getValue()
3740   // might fold any kind of constant expression to an integer constant and that
3741   // is not what we are looking for. Only recognize a bitcast of a genuine
3742   // constant integer as an opaque constant.
3743   else if(ConstantInt *C = dyn_cast<ConstantInt>(I.getOperand(0)))
3744     setValue(&I, DAG.getConstant(C->getValue(), dl, DestVT, /*isTarget=*/false,
3745                                  /*isOpaque*/true));
3746   else
3747     setValue(&I, N);            // noop cast.
3748 }
3749 
3750 void SelectionDAGBuilder::visitAddrSpaceCast(const User &I) {
3751   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3752   const Value *SV = I.getOperand(0);
3753   SDValue N = getValue(SV);
3754   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3755 
3756   unsigned SrcAS = SV->getType()->getPointerAddressSpace();
3757   unsigned DestAS = I.getType()->getPointerAddressSpace();
3758 
3759   if (!TM.isNoopAddrSpaceCast(SrcAS, DestAS))
3760     N = DAG.getAddrSpaceCast(getCurSDLoc(), DestVT, N, SrcAS, DestAS);
3761 
3762   setValue(&I, N);
3763 }
3764 
3765 void SelectionDAGBuilder::visitInsertElement(const User &I) {
3766   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3767   SDValue InVec = getValue(I.getOperand(0));
3768   SDValue InVal = getValue(I.getOperand(1));
3769   SDValue InIdx = DAG.getZExtOrTrunc(getValue(I.getOperand(2)), getCurSDLoc(),
3770                                      TLI.getVectorIdxTy(DAG.getDataLayout()));
3771   setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurSDLoc(),
3772                            TLI.getValueType(DAG.getDataLayout(), I.getType()),
3773                            InVec, InVal, InIdx));
3774 }
3775 
3776 void SelectionDAGBuilder::visitExtractElement(const User &I) {
3777   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3778   SDValue InVec = getValue(I.getOperand(0));
3779   SDValue InIdx = DAG.getZExtOrTrunc(getValue(I.getOperand(1)), getCurSDLoc(),
3780                                      TLI.getVectorIdxTy(DAG.getDataLayout()));
3781   setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(),
3782                            TLI.getValueType(DAG.getDataLayout(), I.getType()),
3783                            InVec, InIdx));
3784 }
3785 
3786 void SelectionDAGBuilder::visitShuffleVector(const User &I) {
3787   SDValue Src1 = getValue(I.getOperand(0));
3788   SDValue Src2 = getValue(I.getOperand(1));
3789   ArrayRef<int> Mask;
3790   if (auto *SVI = dyn_cast<ShuffleVectorInst>(&I))
3791     Mask = SVI->getShuffleMask();
3792   else
3793     Mask = cast<ConstantExpr>(I).getShuffleMask();
3794   SDLoc DL = getCurSDLoc();
3795   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3796   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3797   EVT SrcVT = Src1.getValueType();
3798 
3799   if (all_of(Mask, [](int Elem) { return Elem == 0; }) &&
3800       VT.isScalableVector()) {
3801     // Canonical splat form of first element of first input vector.
3802     SDValue FirstElt =
3803         DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SrcVT.getScalarType(), Src1,
3804                     DAG.getVectorIdxConstant(0, DL));
3805     setValue(&I, DAG.getNode(ISD::SPLAT_VECTOR, DL, VT, FirstElt));
3806     return;
3807   }
3808 
3809   // For now, we only handle splats for scalable vectors.
3810   // The DAGCombiner will perform a BUILD_VECTOR -> SPLAT_VECTOR transformation
3811   // for targets that support a SPLAT_VECTOR for non-scalable vector types.
3812   assert(!VT.isScalableVector() && "Unsupported scalable vector shuffle");
3813 
3814   unsigned SrcNumElts = SrcVT.getVectorNumElements();
3815   unsigned MaskNumElts = Mask.size();
3816 
3817   if (SrcNumElts == MaskNumElts) {
3818     setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, Mask));
3819     return;
3820   }
3821 
3822   // Normalize the shuffle vector since mask and vector length don't match.
3823   if (SrcNumElts < MaskNumElts) {
3824     // Mask is longer than the source vectors. We can use concatenate vector to
3825     // make the mask and vectors lengths match.
3826 
3827     if (MaskNumElts % SrcNumElts == 0) {
3828       // Mask length is a multiple of the source vector length.
3829       // Check if the shuffle is some kind of concatenation of the input
3830       // vectors.
3831       unsigned NumConcat = MaskNumElts / SrcNumElts;
3832       bool IsConcat = true;
3833       SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
3834       for (unsigned i = 0; i != MaskNumElts; ++i) {
3835         int Idx = Mask[i];
3836         if (Idx < 0)
3837           continue;
3838         // Ensure the indices in each SrcVT sized piece are sequential and that
3839         // the same source is used for the whole piece.
3840         if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
3841             (ConcatSrcs[i / SrcNumElts] >= 0 &&
3842              ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts))) {
3843           IsConcat = false;
3844           break;
3845         }
3846         // Remember which source this index came from.
3847         ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
3848       }
3849 
3850       // The shuffle is concatenating multiple vectors together. Just emit
3851       // a CONCAT_VECTORS operation.
3852       if (IsConcat) {
3853         SmallVector<SDValue, 8> ConcatOps;
3854         for (auto Src : ConcatSrcs) {
3855           if (Src < 0)
3856             ConcatOps.push_back(DAG.getUNDEF(SrcVT));
3857           else if (Src == 0)
3858             ConcatOps.push_back(Src1);
3859           else
3860             ConcatOps.push_back(Src2);
3861         }
3862         setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps));
3863         return;
3864       }
3865     }
3866 
3867     unsigned PaddedMaskNumElts = alignTo(MaskNumElts, SrcNumElts);
3868     unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
3869     EVT PaddedVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(),
3870                                     PaddedMaskNumElts);
3871 
3872     // Pad both vectors with undefs to make them the same length as the mask.
3873     SDValue UndefVal = DAG.getUNDEF(SrcVT);
3874 
3875     SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
3876     SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
3877     MOps1[0] = Src1;
3878     MOps2[0] = Src2;
3879 
3880     Src1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps1);
3881     Src2 = DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps2);
3882 
3883     // Readjust mask for new input vector length.
3884     SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1);
3885     for (unsigned i = 0; i != MaskNumElts; ++i) {
3886       int Idx = Mask[i];
3887       if (Idx >= (int)SrcNumElts)
3888         Idx -= SrcNumElts - PaddedMaskNumElts;
3889       MappedOps[i] = Idx;
3890     }
3891 
3892     SDValue Result = DAG.getVectorShuffle(PaddedVT, DL, Src1, Src2, MappedOps);
3893 
3894     // If the concatenated vector was padded, extract a subvector with the
3895     // correct number of elements.
3896     if (MaskNumElts != PaddedMaskNumElts)
3897       Result = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Result,
3898                            DAG.getVectorIdxConstant(0, DL));
3899 
3900     setValue(&I, Result);
3901     return;
3902   }
3903 
3904   if (SrcNumElts > MaskNumElts) {
3905     // Analyze the access pattern of the vector to see if we can extract
3906     // two subvectors and do the shuffle.
3907     int StartIdx[2] = { -1, -1 };  // StartIdx to extract from
3908     bool CanExtract = true;
3909     for (int Idx : Mask) {
3910       unsigned Input = 0;
3911       if (Idx < 0)
3912         continue;
3913 
3914       if (Idx >= (int)SrcNumElts) {
3915         Input = 1;
3916         Idx -= SrcNumElts;
3917       }
3918 
3919       // If all the indices come from the same MaskNumElts sized portion of
3920       // the sources we can use extract. Also make sure the extract wouldn't
3921       // extract past the end of the source.
3922       int NewStartIdx = alignDown(Idx, MaskNumElts);
3923       if (NewStartIdx + MaskNumElts > SrcNumElts ||
3924           (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
3925         CanExtract = false;
3926       // Make sure we always update StartIdx as we use it to track if all
3927       // elements are undef.
3928       StartIdx[Input] = NewStartIdx;
3929     }
3930 
3931     if (StartIdx[0] < 0 && StartIdx[1] < 0) {
3932       setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
3933       return;
3934     }
3935     if (CanExtract) {
3936       // Extract appropriate subvector and generate a vector shuffle
3937       for (unsigned Input = 0; Input < 2; ++Input) {
3938         SDValue &Src = Input == 0 ? Src1 : Src2;
3939         if (StartIdx[Input] < 0)
3940           Src = DAG.getUNDEF(VT);
3941         else {
3942           Src = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Src,
3943                             DAG.getVectorIdxConstant(StartIdx[Input], DL));
3944         }
3945       }
3946 
3947       // Calculate new mask.
3948       SmallVector<int, 8> MappedOps(Mask);
3949       for (int &Idx : MappedOps) {
3950         if (Idx >= (int)SrcNumElts)
3951           Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
3952         else if (Idx >= 0)
3953           Idx -= StartIdx[0];
3954       }
3955 
3956       setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, MappedOps));
3957       return;
3958     }
3959   }
3960 
3961   // We can't use either concat vectors or extract subvectors so fall back to
3962   // replacing the shuffle with extract and build vector.
3963   // to insert and build vector.
3964   EVT EltVT = VT.getVectorElementType();
3965   SmallVector<SDValue,8> Ops;
3966   for (int Idx : Mask) {
3967     SDValue Res;
3968 
3969     if (Idx < 0) {
3970       Res = DAG.getUNDEF(EltVT);
3971     } else {
3972       SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
3973       if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts;
3974 
3975       Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Src,
3976                         DAG.getVectorIdxConstant(Idx, DL));
3977     }
3978 
3979     Ops.push_back(Res);
3980   }
3981 
3982   setValue(&I, DAG.getBuildVector(VT, DL, Ops));
3983 }
3984 
3985 void SelectionDAGBuilder::visitInsertValue(const InsertValueInst &I) {
3986   ArrayRef<unsigned> Indices = I.getIndices();
3987   const Value *Op0 = I.getOperand(0);
3988   const Value *Op1 = I.getOperand(1);
3989   Type *AggTy = I.getType();
3990   Type *ValTy = Op1->getType();
3991   bool IntoUndef = isa<UndefValue>(Op0);
3992   bool FromUndef = isa<UndefValue>(Op1);
3993 
3994   unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
3995 
3996   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3997   SmallVector<EVT, 4> AggValueVTs;
3998   ComputeValueVTs(TLI, DAG.getDataLayout(), AggTy, AggValueVTs);
3999   SmallVector<EVT, 4> ValValueVTs;
4000   ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
4001 
4002   unsigned NumAggValues = AggValueVTs.size();
4003   unsigned NumValValues = ValValueVTs.size();
4004   SmallVector<SDValue, 4> Values(NumAggValues);
4005 
4006   // Ignore an insertvalue that produces an empty object
4007   if (!NumAggValues) {
4008     setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
4009     return;
4010   }
4011 
4012   SDValue Agg = getValue(Op0);
4013   unsigned i = 0;
4014   // Copy the beginning value(s) from the original aggregate.
4015   for (; i != LinearIndex; ++i)
4016     Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
4017                 SDValue(Agg.getNode(), Agg.getResNo() + i);
4018   // Copy values from the inserted value(s).
4019   if (NumValValues) {
4020     SDValue Val = getValue(Op1);
4021     for (; i != LinearIndex + NumValValues; ++i)
4022       Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
4023                   SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
4024   }
4025   // Copy remaining value(s) from the original aggregate.
4026   for (; i != NumAggValues; ++i)
4027     Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
4028                 SDValue(Agg.getNode(), Agg.getResNo() + i);
4029 
4030   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
4031                            DAG.getVTList(AggValueVTs), Values));
4032 }
4033 
4034 void SelectionDAGBuilder::visitExtractValue(const ExtractValueInst &I) {
4035   ArrayRef<unsigned> Indices = I.getIndices();
4036   const Value *Op0 = I.getOperand(0);
4037   Type *AggTy = Op0->getType();
4038   Type *ValTy = I.getType();
4039   bool OutOfUndef = isa<UndefValue>(Op0);
4040 
4041   unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
4042 
4043   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4044   SmallVector<EVT, 4> ValValueVTs;
4045   ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
4046 
4047   unsigned NumValValues = ValValueVTs.size();
4048 
4049   // Ignore a extractvalue that produces an empty object
4050   if (!NumValValues) {
4051     setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
4052     return;
4053   }
4054 
4055   SmallVector<SDValue, 4> Values(NumValValues);
4056 
4057   SDValue Agg = getValue(Op0);
4058   // Copy out the selected value(s).
4059   for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
4060     Values[i - LinearIndex] =
4061       OutOfUndef ?
4062         DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
4063         SDValue(Agg.getNode(), Agg.getResNo() + i);
4064 
4065   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
4066                            DAG.getVTList(ValValueVTs), Values));
4067 }
4068 
4069 void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
4070   Value *Op0 = I.getOperand(0);
4071   // Note that the pointer operand may be a vector of pointers. Take the scalar
4072   // element which holds a pointer.
4073   unsigned AS = Op0->getType()->getScalarType()->getPointerAddressSpace();
4074   SDValue N = getValue(Op0);
4075   SDLoc dl = getCurSDLoc();
4076   auto &TLI = DAG.getTargetLoweringInfo();
4077 
4078   // Normalize Vector GEP - all scalar operands should be converted to the
4079   // splat vector.
4080   bool IsVectorGEP = I.getType()->isVectorTy();
4081   ElementCount VectorElementCount =
4082       IsVectorGEP ? cast<VectorType>(I.getType())->getElementCount()
4083                   : ElementCount::getFixed(0);
4084 
4085   if (IsVectorGEP && !N.getValueType().isVector()) {
4086     LLVMContext &Context = *DAG.getContext();
4087     EVT VT = EVT::getVectorVT(Context, N.getValueType(), VectorElementCount);
4088     N = DAG.getSplat(VT, dl, N);
4089   }
4090 
4091   for (gep_type_iterator GTI = gep_type_begin(&I), E = gep_type_end(&I);
4092        GTI != E; ++GTI) {
4093     const Value *Idx = GTI.getOperand();
4094     if (StructType *StTy = GTI.getStructTypeOrNull()) {
4095       unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
4096       if (Field) {
4097         // N = N + Offset
4098         uint64_t Offset =
4099             DAG.getDataLayout().getStructLayout(StTy)->getElementOffset(Field);
4100 
4101         // In an inbounds GEP with an offset that is nonnegative even when
4102         // interpreted as signed, assume there is no unsigned overflow.
4103         SDNodeFlags Flags;
4104         if (int64_t(Offset) >= 0 && cast<GEPOperator>(I).isInBounds())
4105           Flags.setNoUnsignedWrap(true);
4106 
4107         N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N,
4108                         DAG.getConstant(Offset, dl, N.getValueType()), Flags);
4109       }
4110     } else {
4111       // IdxSize is the width of the arithmetic according to IR semantics.
4112       // In SelectionDAG, we may prefer to do arithmetic in a wider bitwidth
4113       // (and fix up the result later).
4114       unsigned IdxSize = DAG.getDataLayout().getIndexSizeInBits(AS);
4115       MVT IdxTy = MVT::getIntegerVT(IdxSize);
4116       TypeSize ElementSize =
4117           GTI.getSequentialElementStride(DAG.getDataLayout());
4118       // We intentionally mask away the high bits here; ElementSize may not
4119       // fit in IdxTy.
4120       APInt ElementMul(IdxSize, ElementSize.getKnownMinValue());
4121       bool ElementScalable = ElementSize.isScalable();
4122 
4123       // If this is a scalar constant or a splat vector of constants,
4124       // handle it quickly.
4125       const auto *C = dyn_cast<Constant>(Idx);
4126       if (C && isa<VectorType>(C->getType()))
4127         C = C->getSplatValue();
4128 
4129       const auto *CI = dyn_cast_or_null<ConstantInt>(C);
4130       if (CI && CI->isZero())
4131         continue;
4132       if (CI && !ElementScalable) {
4133         APInt Offs = ElementMul * CI->getValue().sextOrTrunc(IdxSize);
4134         LLVMContext &Context = *DAG.getContext();
4135         SDValue OffsVal;
4136         if (IsVectorGEP)
4137           OffsVal = DAG.getConstant(
4138               Offs, dl, EVT::getVectorVT(Context, IdxTy, VectorElementCount));
4139         else
4140           OffsVal = DAG.getConstant(Offs, dl, IdxTy);
4141 
4142         // In an inbounds GEP with an offset that is nonnegative even when
4143         // interpreted as signed, assume there is no unsigned overflow.
4144         SDNodeFlags Flags;
4145         if (Offs.isNonNegative() && cast<GEPOperator>(I).isInBounds())
4146           Flags.setNoUnsignedWrap(true);
4147 
4148         OffsVal = DAG.getSExtOrTrunc(OffsVal, dl, N.getValueType());
4149 
4150         N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N, OffsVal, Flags);
4151         continue;
4152       }
4153 
4154       // N = N + Idx * ElementMul;
4155       SDValue IdxN = getValue(Idx);
4156 
4157       if (!IdxN.getValueType().isVector() && IsVectorGEP) {
4158         EVT VT = EVT::getVectorVT(*Context, IdxN.getValueType(),
4159                                   VectorElementCount);
4160         IdxN = DAG.getSplat(VT, dl, IdxN);
4161       }
4162 
4163       // If the index is smaller or larger than intptr_t, truncate or extend
4164       // it.
4165       IdxN = DAG.getSExtOrTrunc(IdxN, dl, N.getValueType());
4166 
4167       if (ElementScalable) {
4168         EVT VScaleTy = N.getValueType().getScalarType();
4169         SDValue VScale = DAG.getNode(
4170             ISD::VSCALE, dl, VScaleTy,
4171             DAG.getConstant(ElementMul.getZExtValue(), dl, VScaleTy));
4172         if (IsVectorGEP)
4173           VScale = DAG.getSplatVector(N.getValueType(), dl, VScale);
4174         IdxN = DAG.getNode(ISD::MUL, dl, N.getValueType(), IdxN, VScale);
4175       } else {
4176         // If this is a multiply by a power of two, turn it into a shl
4177         // immediately.  This is a very common case.
4178         if (ElementMul != 1) {
4179           if (ElementMul.isPowerOf2()) {
4180             unsigned Amt = ElementMul.logBase2();
4181             IdxN = DAG.getNode(ISD::SHL, dl,
4182                                N.getValueType(), IdxN,
4183                                DAG.getConstant(Amt, dl, IdxN.getValueType()));
4184           } else {
4185             SDValue Scale = DAG.getConstant(ElementMul.getZExtValue(), dl,
4186                                             IdxN.getValueType());
4187             IdxN = DAG.getNode(ISD::MUL, dl,
4188                                N.getValueType(), IdxN, Scale);
4189           }
4190         }
4191       }
4192 
4193       N = DAG.getNode(ISD::ADD, dl,
4194                       N.getValueType(), N, IdxN);
4195     }
4196   }
4197 
4198   MVT PtrTy = TLI.getPointerTy(DAG.getDataLayout(), AS);
4199   MVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout(), AS);
4200   if (IsVectorGEP) {
4201     PtrTy = MVT::getVectorVT(PtrTy, VectorElementCount);
4202     PtrMemTy = MVT::getVectorVT(PtrMemTy, VectorElementCount);
4203   }
4204 
4205   if (PtrMemTy != PtrTy && !cast<GEPOperator>(I).isInBounds())
4206     N = DAG.getPtrExtendInReg(N, dl, PtrMemTy);
4207 
4208   setValue(&I, N);
4209 }
4210 
4211 void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
4212   // If this is a fixed sized alloca in the entry block of the function,
4213   // allocate it statically on the stack.
4214   if (FuncInfo.StaticAllocaMap.count(&I))
4215     return;   // getValue will auto-populate this.
4216 
4217   SDLoc dl = getCurSDLoc();
4218   Type *Ty = I.getAllocatedType();
4219   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4220   auto &DL = DAG.getDataLayout();
4221   TypeSize TySize = DL.getTypeAllocSize(Ty);
4222   MaybeAlign Alignment = std::max(DL.getPrefTypeAlign(Ty), I.getAlign());
4223 
4224   SDValue AllocSize = getValue(I.getArraySize());
4225 
4226   EVT IntPtr = TLI.getPointerTy(DL, I.getAddressSpace());
4227   if (AllocSize.getValueType() != IntPtr)
4228     AllocSize = DAG.getZExtOrTrunc(AllocSize, dl, IntPtr);
4229 
4230   if (TySize.isScalable())
4231     AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr, AllocSize,
4232                             DAG.getVScale(dl, IntPtr,
4233                                           APInt(IntPtr.getScalarSizeInBits(),
4234                                                 TySize.getKnownMinValue())));
4235   else {
4236     SDValue TySizeValue =
4237         DAG.getConstant(TySize.getFixedValue(), dl, MVT::getIntegerVT(64));
4238     AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr, AllocSize,
4239                             DAG.getZExtOrTrunc(TySizeValue, dl, IntPtr));
4240   }
4241 
4242   // Handle alignment.  If the requested alignment is less than or equal to
4243   // the stack alignment, ignore it.  If the size is greater than or equal to
4244   // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
4245   Align StackAlign = DAG.getSubtarget().getFrameLowering()->getStackAlign();
4246   if (*Alignment <= StackAlign)
4247     Alignment = std::nullopt;
4248 
4249   const uint64_t StackAlignMask = StackAlign.value() - 1U;
4250   // Round the size of the allocation up to the stack alignment size
4251   // by add SA-1 to the size. This doesn't overflow because we're computing
4252   // an address inside an alloca.
4253   SDNodeFlags Flags;
4254   Flags.setNoUnsignedWrap(true);
4255   AllocSize = DAG.getNode(ISD::ADD, dl, AllocSize.getValueType(), AllocSize,
4256                           DAG.getConstant(StackAlignMask, dl, IntPtr), Flags);
4257 
4258   // Mask out the low bits for alignment purposes.
4259   AllocSize = DAG.getNode(ISD::AND, dl, AllocSize.getValueType(), AllocSize,
4260                           DAG.getConstant(~StackAlignMask, dl, IntPtr));
4261 
4262   SDValue Ops[] = {
4263       getRoot(), AllocSize,
4264       DAG.getConstant(Alignment ? Alignment->value() : 0, dl, IntPtr)};
4265   SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
4266   SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, dl, VTs, Ops);
4267   setValue(&I, DSA);
4268   DAG.setRoot(DSA.getValue(1));
4269 
4270   assert(FuncInfo.MF->getFrameInfo().hasVarSizedObjects());
4271 }
4272 
4273 static const MDNode *getRangeMetadata(const Instruction &I) {
4274   // If !noundef is not present, then !range violation results in a poison
4275   // value rather than immediate undefined behavior. In theory, transferring
4276   // these annotations to SDAG is fine, but in practice there are key SDAG
4277   // transforms that are known not to be poison-safe, such as folding logical
4278   // and/or to bitwise and/or. For now, only transfer !range if !noundef is
4279   // also present.
4280   if (!I.hasMetadata(LLVMContext::MD_noundef))
4281     return nullptr;
4282   return I.getMetadata(LLVMContext::MD_range);
4283 }
4284 
4285 void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
4286   if (I.isAtomic())
4287     return visitAtomicLoad(I);
4288 
4289   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4290   const Value *SV = I.getOperand(0);
4291   if (TLI.supportSwiftError()) {
4292     // Swifterror values can come from either a function parameter with
4293     // swifterror attribute or an alloca with swifterror attribute.
4294     if (const Argument *Arg = dyn_cast<Argument>(SV)) {
4295       if (Arg->hasSwiftErrorAttr())
4296         return visitLoadFromSwiftError(I);
4297     }
4298 
4299     if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
4300       if (Alloca->isSwiftError())
4301         return visitLoadFromSwiftError(I);
4302     }
4303   }
4304 
4305   SDValue Ptr = getValue(SV);
4306 
4307   Type *Ty = I.getType();
4308   SmallVector<EVT, 4> ValueVTs, MemVTs;
4309   SmallVector<TypeSize, 4> Offsets;
4310   ComputeValueVTs(TLI, DAG.getDataLayout(), Ty, ValueVTs, &MemVTs, &Offsets, 0);
4311   unsigned NumValues = ValueVTs.size();
4312   if (NumValues == 0)
4313     return;
4314 
4315   Align Alignment = I.getAlign();
4316   AAMDNodes AAInfo = I.getAAMetadata();
4317   const MDNode *Ranges = getRangeMetadata(I);
4318   bool isVolatile = I.isVolatile();
4319   MachineMemOperand::Flags MMOFlags =
4320       TLI.getLoadMemOperandFlags(I, DAG.getDataLayout(), AC, LibInfo);
4321 
4322   SDValue Root;
4323   bool ConstantMemory = false;
4324   if (isVolatile)
4325     // Serialize volatile loads with other side effects.
4326     Root = getRoot();
4327   else if (NumValues > MaxParallelChains)
4328     Root = getMemoryRoot();
4329   else if (AA &&
4330            AA->pointsToConstantMemory(MemoryLocation(
4331                SV,
4332                LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)),
4333                AAInfo))) {
4334     // Do not serialize (non-volatile) loads of constant memory with anything.
4335     Root = DAG.getEntryNode();
4336     ConstantMemory = true;
4337     MMOFlags |= MachineMemOperand::MOInvariant;
4338   } else {
4339     // Do not serialize non-volatile loads against each other.
4340     Root = DAG.getRoot();
4341   }
4342 
4343   SDLoc dl = getCurSDLoc();
4344 
4345   if (isVolatile)
4346     Root = TLI.prepareVolatileOrAtomicLoad(Root, dl, DAG);
4347 
4348   SmallVector<SDValue, 4> Values(NumValues);
4349   SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
4350 
4351   unsigned ChainI = 0;
4352   for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4353     // Serializing loads here may result in excessive register pressure, and
4354     // TokenFactor places arbitrary choke points on the scheduler. SD scheduling
4355     // could recover a bit by hoisting nodes upward in the chain by recognizing
4356     // they are side-effect free or do not alias. The optimizer should really
4357     // avoid this case by converting large object/array copies to llvm.memcpy
4358     // (MaxParallelChains should always remain as failsafe).
4359     if (ChainI == MaxParallelChains) {
4360       assert(PendingLoads.empty() && "PendingLoads must be serialized first");
4361       SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4362                                   ArrayRef(Chains.data(), ChainI));
4363       Root = Chain;
4364       ChainI = 0;
4365     }
4366 
4367     // TODO: MachinePointerInfo only supports a fixed length offset.
4368     MachinePointerInfo PtrInfo =
4369         !Offsets[i].isScalable() || Offsets[i].isZero()
4370             ? MachinePointerInfo(SV, Offsets[i].getKnownMinValue())
4371             : MachinePointerInfo();
4372 
4373     SDValue A = DAG.getObjectPtrOffset(dl, Ptr, Offsets[i]);
4374     SDValue L = DAG.getLoad(MemVTs[i], dl, Root, A, PtrInfo, Alignment,
4375                             MMOFlags, AAInfo, Ranges);
4376     Chains[ChainI] = L.getValue(1);
4377 
4378     if (MemVTs[i] != ValueVTs[i])
4379       L = DAG.getPtrExtOrTrunc(L, dl, ValueVTs[i]);
4380 
4381     Values[i] = L;
4382   }
4383 
4384   if (!ConstantMemory) {
4385     SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4386                                 ArrayRef(Chains.data(), ChainI));
4387     if (isVolatile)
4388       DAG.setRoot(Chain);
4389     else
4390       PendingLoads.push_back(Chain);
4391   }
4392 
4393   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, dl,
4394                            DAG.getVTList(ValueVTs), Values));
4395 }
4396 
4397 void SelectionDAGBuilder::visitStoreToSwiftError(const StoreInst &I) {
4398   assert(DAG.getTargetLoweringInfo().supportSwiftError() &&
4399          "call visitStoreToSwiftError when backend supports swifterror");
4400 
4401   SmallVector<EVT, 4> ValueVTs;
4402   SmallVector<uint64_t, 4> Offsets;
4403   const Value *SrcV = I.getOperand(0);
4404   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
4405                   SrcV->getType(), ValueVTs, &Offsets, 0);
4406   assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
4407          "expect a single EVT for swifterror");
4408 
4409   SDValue Src = getValue(SrcV);
4410   // Create a virtual register, then update the virtual register.
4411   Register VReg =
4412       SwiftError.getOrCreateVRegDefAt(&I, FuncInfo.MBB, I.getPointerOperand());
4413   // Chain, DL, Reg, N or Chain, DL, Reg, N, Glue
4414   // Chain can be getRoot or getControlRoot.
4415   SDValue CopyNode = DAG.getCopyToReg(getRoot(), getCurSDLoc(), VReg,
4416                                       SDValue(Src.getNode(), Src.getResNo()));
4417   DAG.setRoot(CopyNode);
4418 }
4419 
4420 void SelectionDAGBuilder::visitLoadFromSwiftError(const LoadInst &I) {
4421   assert(DAG.getTargetLoweringInfo().supportSwiftError() &&
4422          "call visitLoadFromSwiftError when backend supports swifterror");
4423 
4424   assert(!I.isVolatile() &&
4425          !I.hasMetadata(LLVMContext::MD_nontemporal) &&
4426          !I.hasMetadata(LLVMContext::MD_invariant_load) &&
4427          "Support volatile, non temporal, invariant for load_from_swift_error");
4428 
4429   const Value *SV = I.getOperand(0);
4430   Type *Ty = I.getType();
4431   assert(
4432       (!AA ||
4433        !AA->pointsToConstantMemory(MemoryLocation(
4434            SV, LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)),
4435            I.getAAMetadata()))) &&
4436       "load_from_swift_error should not be constant memory");
4437 
4438   SmallVector<EVT, 4> ValueVTs;
4439   SmallVector<uint64_t, 4> Offsets;
4440   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), Ty,
4441                   ValueVTs, &Offsets, 0);
4442   assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
4443          "expect a single EVT for swifterror");
4444 
4445   // Chain, DL, Reg, VT, Glue or Chain, DL, Reg, VT
4446   SDValue L = DAG.getCopyFromReg(
4447       getRoot(), getCurSDLoc(),
4448       SwiftError.getOrCreateVRegUseAt(&I, FuncInfo.MBB, SV), ValueVTs[0]);
4449 
4450   setValue(&I, L);
4451 }
4452 
4453 void SelectionDAGBuilder::visitStore(const StoreInst &I) {
4454   if (I.isAtomic())
4455     return visitAtomicStore(I);
4456 
4457   const Value *SrcV = I.getOperand(0);
4458   const Value *PtrV = I.getOperand(1);
4459 
4460   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4461   if (TLI.supportSwiftError()) {
4462     // Swifterror values can come from either a function parameter with
4463     // swifterror attribute or an alloca with swifterror attribute.
4464     if (const Argument *Arg = dyn_cast<Argument>(PtrV)) {
4465       if (Arg->hasSwiftErrorAttr())
4466         return visitStoreToSwiftError(I);
4467     }
4468 
4469     if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
4470       if (Alloca->isSwiftError())
4471         return visitStoreToSwiftError(I);
4472     }
4473   }
4474 
4475   SmallVector<EVT, 4> ValueVTs, MemVTs;
4476   SmallVector<TypeSize, 4> Offsets;
4477   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
4478                   SrcV->getType(), ValueVTs, &MemVTs, &Offsets, 0);
4479   unsigned NumValues = ValueVTs.size();
4480   if (NumValues == 0)
4481     return;
4482 
4483   // Get the lowered operands. Note that we do this after
4484   // checking if NumResults is zero, because with zero results
4485   // the operands won't have values in the map.
4486   SDValue Src = getValue(SrcV);
4487   SDValue Ptr = getValue(PtrV);
4488 
4489   SDValue Root = I.isVolatile() ? getRoot() : getMemoryRoot();
4490   SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
4491   SDLoc dl = getCurSDLoc();
4492   Align Alignment = I.getAlign();
4493   AAMDNodes AAInfo = I.getAAMetadata();
4494 
4495   auto MMOFlags = TLI.getStoreMemOperandFlags(I, DAG.getDataLayout());
4496 
4497   unsigned ChainI = 0;
4498   for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4499     // See visitLoad comments.
4500     if (ChainI == MaxParallelChains) {
4501       SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4502                                   ArrayRef(Chains.data(), ChainI));
4503       Root = Chain;
4504       ChainI = 0;
4505     }
4506 
4507     // TODO: MachinePointerInfo only supports a fixed length offset.
4508     MachinePointerInfo PtrInfo =
4509         !Offsets[i].isScalable() || Offsets[i].isZero()
4510             ? MachinePointerInfo(PtrV, Offsets[i].getKnownMinValue())
4511             : MachinePointerInfo();
4512 
4513     SDValue Add = DAG.getObjectPtrOffset(dl, Ptr, Offsets[i]);
4514     SDValue Val = SDValue(Src.getNode(), Src.getResNo() + i);
4515     if (MemVTs[i] != ValueVTs[i])
4516       Val = DAG.getPtrExtOrTrunc(Val, dl, MemVTs[i]);
4517     SDValue St =
4518         DAG.getStore(Root, dl, Val, Add, PtrInfo, Alignment, MMOFlags, AAInfo);
4519     Chains[ChainI] = St;
4520   }
4521 
4522   SDValue StoreNode = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4523                                   ArrayRef(Chains.data(), ChainI));
4524   setValue(&I, StoreNode);
4525   DAG.setRoot(StoreNode);
4526 }
4527 
4528 void SelectionDAGBuilder::visitMaskedStore(const CallInst &I,
4529                                            bool IsCompressing) {
4530   SDLoc sdl = getCurSDLoc();
4531 
4532   auto getMaskedStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4533                                MaybeAlign &Alignment) {
4534     // llvm.masked.store.*(Src0, Ptr, alignment, Mask)
4535     Src0 = I.getArgOperand(0);
4536     Ptr = I.getArgOperand(1);
4537     Alignment = cast<ConstantInt>(I.getArgOperand(2))->getMaybeAlignValue();
4538     Mask = I.getArgOperand(3);
4539   };
4540   auto getCompressingStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4541                                     MaybeAlign &Alignment) {
4542     // llvm.masked.compressstore.*(Src0, Ptr, Mask)
4543     Src0 = I.getArgOperand(0);
4544     Ptr = I.getArgOperand(1);
4545     Mask = I.getArgOperand(2);
4546     Alignment = std::nullopt;
4547   };
4548 
4549   Value  *PtrOperand, *MaskOperand, *Src0Operand;
4550   MaybeAlign Alignment;
4551   if (IsCompressing)
4552     getCompressingStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4553   else
4554     getMaskedStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4555 
4556   SDValue Ptr = getValue(PtrOperand);
4557   SDValue Src0 = getValue(Src0Operand);
4558   SDValue Mask = getValue(MaskOperand);
4559   SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
4560 
4561   EVT VT = Src0.getValueType();
4562   if (!Alignment)
4563     Alignment = DAG.getEVTAlign(VT);
4564 
4565   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4566       MachinePointerInfo(PtrOperand), MachineMemOperand::MOStore,
4567       MemoryLocation::UnknownSize, *Alignment, I.getAAMetadata());
4568   SDValue StoreNode =
4569       DAG.getMaskedStore(getMemoryRoot(), sdl, Src0, Ptr, Offset, Mask, VT, MMO,
4570                          ISD::UNINDEXED, false /* Truncating */, IsCompressing);
4571   DAG.setRoot(StoreNode);
4572   setValue(&I, StoreNode);
4573 }
4574 
4575 // Get a uniform base for the Gather/Scatter intrinsic.
4576 // The first argument of the Gather/Scatter intrinsic is a vector of pointers.
4577 // We try to represent it as a base pointer + vector of indices.
4578 // Usually, the vector of pointers comes from a 'getelementptr' instruction.
4579 // The first operand of the GEP may be a single pointer or a vector of pointers
4580 // Example:
4581 //   %gep.ptr = getelementptr i32, <8 x i32*> %vptr, <8 x i32> %ind
4582 //  or
4583 //   %gep.ptr = getelementptr i32, i32* %ptr,        <8 x i32> %ind
4584 // %res = call <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> %gep.ptr, ..
4585 //
4586 // When the first GEP operand is a single pointer - it is the uniform base we
4587 // are looking for. If first operand of the GEP is a splat vector - we
4588 // extract the splat value and use it as a uniform base.
4589 // In all other cases the function returns 'false'.
4590 static bool getUniformBase(const Value *Ptr, SDValue &Base, SDValue &Index,
4591                            ISD::MemIndexType &IndexType, SDValue &Scale,
4592                            SelectionDAGBuilder *SDB, const BasicBlock *CurBB,
4593                            uint64_t ElemSize) {
4594   SelectionDAG& DAG = SDB->DAG;
4595   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4596   const DataLayout &DL = DAG.getDataLayout();
4597 
4598   assert(Ptr->getType()->isVectorTy() && "Unexpected pointer type");
4599 
4600   // Handle splat constant pointer.
4601   if (auto *C = dyn_cast<Constant>(Ptr)) {
4602     C = C->getSplatValue();
4603     if (!C)
4604       return false;
4605 
4606     Base = SDB->getValue(C);
4607 
4608     ElementCount NumElts = cast<VectorType>(Ptr->getType())->getElementCount();
4609     EVT VT = EVT::getVectorVT(*DAG.getContext(), TLI.getPointerTy(DL), NumElts);
4610     Index = DAG.getConstant(0, SDB->getCurSDLoc(), VT);
4611     IndexType = ISD::SIGNED_SCALED;
4612     Scale = DAG.getTargetConstant(1, SDB->getCurSDLoc(), TLI.getPointerTy(DL));
4613     return true;
4614   }
4615 
4616   const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
4617   if (!GEP || GEP->getParent() != CurBB)
4618     return false;
4619 
4620   if (GEP->getNumOperands() != 2)
4621     return false;
4622 
4623   const Value *BasePtr = GEP->getPointerOperand();
4624   const Value *IndexVal = GEP->getOperand(GEP->getNumOperands() - 1);
4625 
4626   // Make sure the base is scalar and the index is a vector.
4627   if (BasePtr->getType()->isVectorTy() || !IndexVal->getType()->isVectorTy())
4628     return false;
4629 
4630   TypeSize ScaleVal = DL.getTypeAllocSize(GEP->getResultElementType());
4631   if (ScaleVal.isScalable())
4632     return false;
4633 
4634   // Target may not support the required addressing mode.
4635   if (ScaleVal != 1 &&
4636       !TLI.isLegalScaleForGatherScatter(ScaleVal.getFixedValue(), ElemSize))
4637     return false;
4638 
4639   Base = SDB->getValue(BasePtr);
4640   Index = SDB->getValue(IndexVal);
4641   IndexType = ISD::SIGNED_SCALED;
4642 
4643   Scale =
4644       DAG.getTargetConstant(ScaleVal, SDB->getCurSDLoc(), TLI.getPointerTy(DL));
4645   return true;
4646 }
4647 
4648 void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
4649   SDLoc sdl = getCurSDLoc();
4650 
4651   // llvm.masked.scatter.*(Src0, Ptrs, alignment, Mask)
4652   const Value *Ptr = I.getArgOperand(1);
4653   SDValue Src0 = getValue(I.getArgOperand(0));
4654   SDValue Mask = getValue(I.getArgOperand(3));
4655   EVT VT = Src0.getValueType();
4656   Align Alignment = cast<ConstantInt>(I.getArgOperand(2))
4657                         ->getMaybeAlignValue()
4658                         .value_or(DAG.getEVTAlign(VT.getScalarType()));
4659   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4660 
4661   SDValue Base;
4662   SDValue Index;
4663   ISD::MemIndexType IndexType;
4664   SDValue Scale;
4665   bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this,
4666                                     I.getParent(), VT.getScalarStoreSize());
4667 
4668   unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
4669   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4670       MachinePointerInfo(AS), MachineMemOperand::MOStore,
4671       // TODO: Make MachineMemOperands aware of scalable
4672       // vectors.
4673       MemoryLocation::UnknownSize, Alignment, I.getAAMetadata());
4674   if (!UniformBase) {
4675     Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4676     Index = getValue(Ptr);
4677     IndexType = ISD::SIGNED_SCALED;
4678     Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4679   }
4680 
4681   EVT IdxVT = Index.getValueType();
4682   EVT EltTy = IdxVT.getVectorElementType();
4683   if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
4684     EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
4685     Index = DAG.getNode(ISD::SIGN_EXTEND, sdl, NewIdxVT, Index);
4686   }
4687 
4688   SDValue Ops[] = { getMemoryRoot(), Src0, Mask, Base, Index, Scale };
4689   SDValue Scatter = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), VT, sdl,
4690                                          Ops, MMO, IndexType, false);
4691   DAG.setRoot(Scatter);
4692   setValue(&I, Scatter);
4693 }
4694 
4695 void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) {
4696   SDLoc sdl = getCurSDLoc();
4697 
4698   auto getMaskedLoadOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4699                               MaybeAlign &Alignment) {
4700     // @llvm.masked.load.*(Ptr, alignment, Mask, Src0)
4701     Ptr = I.getArgOperand(0);
4702     Alignment = cast<ConstantInt>(I.getArgOperand(1))->getMaybeAlignValue();
4703     Mask = I.getArgOperand(2);
4704     Src0 = I.getArgOperand(3);
4705   };
4706   auto getExpandingLoadOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4707                                  MaybeAlign &Alignment) {
4708     // @llvm.masked.expandload.*(Ptr, Mask, Src0)
4709     Ptr = I.getArgOperand(0);
4710     Alignment = std::nullopt;
4711     Mask = I.getArgOperand(1);
4712     Src0 = I.getArgOperand(2);
4713   };
4714 
4715   Value  *PtrOperand, *MaskOperand, *Src0Operand;
4716   MaybeAlign Alignment;
4717   if (IsExpanding)
4718     getExpandingLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4719   else
4720     getMaskedLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4721 
4722   SDValue Ptr = getValue(PtrOperand);
4723   SDValue Src0 = getValue(Src0Operand);
4724   SDValue Mask = getValue(MaskOperand);
4725   SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
4726 
4727   EVT VT = Src0.getValueType();
4728   if (!Alignment)
4729     Alignment = DAG.getEVTAlign(VT);
4730 
4731   AAMDNodes AAInfo = I.getAAMetadata();
4732   const MDNode *Ranges = getRangeMetadata(I);
4733 
4734   // Do not serialize masked loads of constant memory with anything.
4735   MemoryLocation ML = MemoryLocation::getAfter(PtrOperand, AAInfo);
4736   bool AddToChain = !AA || !AA->pointsToConstantMemory(ML);
4737 
4738   SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
4739 
4740   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4741       MachinePointerInfo(PtrOperand), MachineMemOperand::MOLoad,
4742       MemoryLocation::UnknownSize, *Alignment, AAInfo, Ranges);
4743 
4744   SDValue Load =
4745       DAG.getMaskedLoad(VT, sdl, InChain, Ptr, Offset, Mask, Src0, VT, MMO,
4746                         ISD::UNINDEXED, ISD::NON_EXTLOAD, IsExpanding);
4747   if (AddToChain)
4748     PendingLoads.push_back(Load.getValue(1));
4749   setValue(&I, Load);
4750 }
4751 
4752 void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
4753   SDLoc sdl = getCurSDLoc();
4754 
4755   // @llvm.masked.gather.*(Ptrs, alignment, Mask, Src0)
4756   const Value *Ptr = I.getArgOperand(0);
4757   SDValue Src0 = getValue(I.getArgOperand(3));
4758   SDValue Mask = getValue(I.getArgOperand(2));
4759 
4760   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4761   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4762   Align Alignment = cast<ConstantInt>(I.getArgOperand(1))
4763                         ->getMaybeAlignValue()
4764                         .value_or(DAG.getEVTAlign(VT.getScalarType()));
4765 
4766   const MDNode *Ranges = getRangeMetadata(I);
4767 
4768   SDValue Root = DAG.getRoot();
4769   SDValue Base;
4770   SDValue Index;
4771   ISD::MemIndexType IndexType;
4772   SDValue Scale;
4773   bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this,
4774                                     I.getParent(), VT.getScalarStoreSize());
4775   unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
4776   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4777       MachinePointerInfo(AS), MachineMemOperand::MOLoad,
4778       // TODO: Make MachineMemOperands aware of scalable
4779       // vectors.
4780       MemoryLocation::UnknownSize, Alignment, I.getAAMetadata(), Ranges);
4781 
4782   if (!UniformBase) {
4783     Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4784     Index = getValue(Ptr);
4785     IndexType = ISD::SIGNED_SCALED;
4786     Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4787   }
4788 
4789   EVT IdxVT = Index.getValueType();
4790   EVT EltTy = IdxVT.getVectorElementType();
4791   if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
4792     EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
4793     Index = DAG.getNode(ISD::SIGN_EXTEND, sdl, NewIdxVT, Index);
4794   }
4795 
4796   SDValue Ops[] = { Root, Src0, Mask, Base, Index, Scale };
4797   SDValue Gather = DAG.getMaskedGather(DAG.getVTList(VT, MVT::Other), VT, sdl,
4798                                        Ops, MMO, IndexType, ISD::NON_EXTLOAD);
4799 
4800   PendingLoads.push_back(Gather.getValue(1));
4801   setValue(&I, Gather);
4802 }
4803 
4804 void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
4805   SDLoc dl = getCurSDLoc();
4806   AtomicOrdering SuccessOrdering = I.getSuccessOrdering();
4807   AtomicOrdering FailureOrdering = I.getFailureOrdering();
4808   SyncScope::ID SSID = I.getSyncScopeID();
4809 
4810   SDValue InChain = getRoot();
4811 
4812   MVT MemVT = getValue(I.getCompareOperand()).getSimpleValueType();
4813   SDVTList VTs = DAG.getVTList(MemVT, MVT::i1, MVT::Other);
4814 
4815   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4816   auto Flags = TLI.getAtomicMemOperandFlags(I, DAG.getDataLayout());
4817 
4818   MachineFunction &MF = DAG.getMachineFunction();
4819   MachineMemOperand *MMO = MF.getMachineMemOperand(
4820       MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4821       DAG.getEVTAlign(MemVT), AAMDNodes(), nullptr, SSID, SuccessOrdering,
4822       FailureOrdering);
4823 
4824   SDValue L = DAG.getAtomicCmpSwap(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS,
4825                                    dl, MemVT, VTs, InChain,
4826                                    getValue(I.getPointerOperand()),
4827                                    getValue(I.getCompareOperand()),
4828                                    getValue(I.getNewValOperand()), MMO);
4829 
4830   SDValue OutChain = L.getValue(2);
4831 
4832   setValue(&I, L);
4833   DAG.setRoot(OutChain);
4834 }
4835 
4836 void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
4837   SDLoc dl = getCurSDLoc();
4838   ISD::NodeType NT;
4839   switch (I.getOperation()) {
4840   default: llvm_unreachable("Unknown atomicrmw operation");
4841   case AtomicRMWInst::Xchg: NT = ISD::ATOMIC_SWAP; break;
4842   case AtomicRMWInst::Add:  NT = ISD::ATOMIC_LOAD_ADD; break;
4843   case AtomicRMWInst::Sub:  NT = ISD::ATOMIC_LOAD_SUB; break;
4844   case AtomicRMWInst::And:  NT = ISD::ATOMIC_LOAD_AND; break;
4845   case AtomicRMWInst::Nand: NT = ISD::ATOMIC_LOAD_NAND; break;
4846   case AtomicRMWInst::Or:   NT = ISD::ATOMIC_LOAD_OR; break;
4847   case AtomicRMWInst::Xor:  NT = ISD::ATOMIC_LOAD_XOR; break;
4848   case AtomicRMWInst::Max:  NT = ISD::ATOMIC_LOAD_MAX; break;
4849   case AtomicRMWInst::Min:  NT = ISD::ATOMIC_LOAD_MIN; break;
4850   case AtomicRMWInst::UMax: NT = ISD::ATOMIC_LOAD_UMAX; break;
4851   case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break;
4852   case AtomicRMWInst::FAdd: NT = ISD::ATOMIC_LOAD_FADD; break;
4853   case AtomicRMWInst::FSub: NT = ISD::ATOMIC_LOAD_FSUB; break;
4854   case AtomicRMWInst::FMax: NT = ISD::ATOMIC_LOAD_FMAX; break;
4855   case AtomicRMWInst::FMin: NT = ISD::ATOMIC_LOAD_FMIN; break;
4856   case AtomicRMWInst::UIncWrap:
4857     NT = ISD::ATOMIC_LOAD_UINC_WRAP;
4858     break;
4859   case AtomicRMWInst::UDecWrap:
4860     NT = ISD::ATOMIC_LOAD_UDEC_WRAP;
4861     break;
4862   }
4863   AtomicOrdering Ordering = I.getOrdering();
4864   SyncScope::ID SSID = I.getSyncScopeID();
4865 
4866   SDValue InChain = getRoot();
4867 
4868   auto MemVT = getValue(I.getValOperand()).getSimpleValueType();
4869   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4870   auto Flags = TLI.getAtomicMemOperandFlags(I, DAG.getDataLayout());
4871 
4872   MachineFunction &MF = DAG.getMachineFunction();
4873   MachineMemOperand *MMO = MF.getMachineMemOperand(
4874       MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4875       DAG.getEVTAlign(MemVT), AAMDNodes(), nullptr, SSID, Ordering);
4876 
4877   SDValue L =
4878     DAG.getAtomic(NT, dl, MemVT, InChain,
4879                   getValue(I.getPointerOperand()), getValue(I.getValOperand()),
4880                   MMO);
4881 
4882   SDValue OutChain = L.getValue(1);
4883 
4884   setValue(&I, L);
4885   DAG.setRoot(OutChain);
4886 }
4887 
4888 void SelectionDAGBuilder::visitFence(const FenceInst &I) {
4889   SDLoc dl = getCurSDLoc();
4890   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4891   SDValue Ops[3];
4892   Ops[0] = getRoot();
4893   Ops[1] = DAG.getTargetConstant((unsigned)I.getOrdering(), dl,
4894                                  TLI.getFenceOperandTy(DAG.getDataLayout()));
4895   Ops[2] = DAG.getTargetConstant(I.getSyncScopeID(), dl,
4896                                  TLI.getFenceOperandTy(DAG.getDataLayout()));
4897   SDValue N = DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops);
4898   setValue(&I, N);
4899   DAG.setRoot(N);
4900 }
4901 
4902 void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
4903   SDLoc dl = getCurSDLoc();
4904   AtomicOrdering Order = I.getOrdering();
4905   SyncScope::ID SSID = I.getSyncScopeID();
4906 
4907   SDValue InChain = getRoot();
4908 
4909   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4910   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4911   EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getType());
4912 
4913   if (!TLI.supportsUnalignedAtomics() &&
4914       I.getAlign().value() < MemVT.getSizeInBits() / 8)
4915     report_fatal_error("Cannot generate unaligned atomic load");
4916 
4917   auto Flags = TLI.getLoadMemOperandFlags(I, DAG.getDataLayout(), AC, LibInfo);
4918 
4919   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4920       MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4921       I.getAlign(), AAMDNodes(), nullptr, SSID, Order);
4922 
4923   InChain = TLI.prepareVolatileOrAtomicLoad(InChain, dl, DAG);
4924 
4925   SDValue Ptr = getValue(I.getPointerOperand());
4926   SDValue L = DAG.getAtomic(ISD::ATOMIC_LOAD, dl, MemVT, MemVT, InChain,
4927                             Ptr, MMO);
4928 
4929   SDValue OutChain = L.getValue(1);
4930   if (MemVT != VT)
4931     L = DAG.getPtrExtOrTrunc(L, dl, VT);
4932 
4933   setValue(&I, L);
4934   DAG.setRoot(OutChain);
4935 }
4936 
4937 void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
4938   SDLoc dl = getCurSDLoc();
4939 
4940   AtomicOrdering Ordering = I.getOrdering();
4941   SyncScope::ID SSID = I.getSyncScopeID();
4942 
4943   SDValue InChain = getRoot();
4944 
4945   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4946   EVT MemVT =
4947       TLI.getMemValueType(DAG.getDataLayout(), I.getValueOperand()->getType());
4948 
4949   if (!TLI.supportsUnalignedAtomics() &&
4950       I.getAlign().value() < MemVT.getSizeInBits() / 8)
4951     report_fatal_error("Cannot generate unaligned atomic store");
4952 
4953   auto Flags = TLI.getStoreMemOperandFlags(I, DAG.getDataLayout());
4954 
4955   MachineFunction &MF = DAG.getMachineFunction();
4956   MachineMemOperand *MMO = MF.getMachineMemOperand(
4957       MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4958       I.getAlign(), AAMDNodes(), nullptr, SSID, Ordering);
4959 
4960   SDValue Val = getValue(I.getValueOperand());
4961   if (Val.getValueType() != MemVT)
4962     Val = DAG.getPtrExtOrTrunc(Val, dl, MemVT);
4963   SDValue Ptr = getValue(I.getPointerOperand());
4964 
4965   SDValue OutChain =
4966       DAG.getAtomic(ISD::ATOMIC_STORE, dl, MemVT, InChain, Val, Ptr, MMO);
4967 
4968   setValue(&I, OutChain);
4969   DAG.setRoot(OutChain);
4970 }
4971 
4972 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
4973 /// node.
4974 void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
4975                                                unsigned Intrinsic) {
4976   // Ignore the callsite's attributes. A specific call site may be marked with
4977   // readnone, but the lowering code will expect the chain based on the
4978   // definition.
4979   const Function *F = I.getCalledFunction();
4980   bool HasChain = !F->doesNotAccessMemory();
4981   bool OnlyLoad = HasChain && F->onlyReadsMemory();
4982 
4983   // Build the operand list.
4984   SmallVector<SDValue, 8> Ops;
4985   if (HasChain) {  // If this intrinsic has side-effects, chainify it.
4986     if (OnlyLoad) {
4987       // We don't need to serialize loads against other loads.
4988       Ops.push_back(DAG.getRoot());
4989     } else {
4990       Ops.push_back(getRoot());
4991     }
4992   }
4993 
4994   // Info is set by getTgtMemIntrinsic
4995   TargetLowering::IntrinsicInfo Info;
4996   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4997   bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I,
4998                                                DAG.getMachineFunction(),
4999                                                Intrinsic);
5000 
5001   // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
5002   if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID ||
5003       Info.opc == ISD::INTRINSIC_W_CHAIN)
5004     Ops.push_back(DAG.getTargetConstant(Intrinsic, getCurSDLoc(),
5005                                         TLI.getPointerTy(DAG.getDataLayout())));
5006 
5007   // Add all operands of the call to the operand list.
5008   for (unsigned i = 0, e = I.arg_size(); i != e; ++i) {
5009     const Value *Arg = I.getArgOperand(i);
5010     if (!I.paramHasAttr(i, Attribute::ImmArg)) {
5011       Ops.push_back(getValue(Arg));
5012       continue;
5013     }
5014 
5015     // Use TargetConstant instead of a regular constant for immarg.
5016     EVT VT = TLI.getValueType(DAG.getDataLayout(), Arg->getType(), true);
5017     if (const ConstantInt *CI = dyn_cast<ConstantInt>(Arg)) {
5018       assert(CI->getBitWidth() <= 64 &&
5019              "large intrinsic immediates not handled");
5020       Ops.push_back(DAG.getTargetConstant(*CI, SDLoc(), VT));
5021     } else {
5022       Ops.push_back(
5023           DAG.getTargetConstantFP(*cast<ConstantFP>(Arg), SDLoc(), VT));
5024     }
5025   }
5026 
5027   SmallVector<EVT, 4> ValueVTs;
5028   ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);
5029 
5030   if (HasChain)
5031     ValueVTs.push_back(MVT::Other);
5032 
5033   SDVTList VTs = DAG.getVTList(ValueVTs);
5034 
5035   // Propagate fast-math-flags from IR to node(s).
5036   SDNodeFlags Flags;
5037   if (auto *FPMO = dyn_cast<FPMathOperator>(&I))
5038     Flags.copyFMF(*FPMO);
5039   SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
5040 
5041   // Create the node.
5042   SDValue Result;
5043   // In some cases, custom collection of operands from CallInst I may be needed.
5044   TLI.CollectTargetIntrinsicOperands(I, Ops, DAG);
5045   if (IsTgtIntrinsic) {
5046     // This is target intrinsic that touches memory
5047     //
5048     // TODO: We currently just fallback to address space 0 if getTgtMemIntrinsic
5049     //       didn't yield anything useful.
5050     MachinePointerInfo MPI;
5051     if (Info.ptrVal)
5052       MPI = MachinePointerInfo(Info.ptrVal, Info.offset);
5053     else if (Info.fallbackAddressSpace)
5054       MPI = MachinePointerInfo(*Info.fallbackAddressSpace);
5055     Result = DAG.getMemIntrinsicNode(Info.opc, getCurSDLoc(), VTs, Ops,
5056                                      Info.memVT, MPI, Info.align, Info.flags,
5057                                      Info.size, I.getAAMetadata());
5058   } else if (!HasChain) {
5059     Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurSDLoc(), VTs, Ops);
5060   } else if (!I.getType()->isVoidTy()) {
5061     Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurSDLoc(), VTs, Ops);
5062   } else {
5063     Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops);
5064   }
5065 
5066   if (HasChain) {
5067     SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
5068     if (OnlyLoad)
5069       PendingLoads.push_back(Chain);
5070     else
5071       DAG.setRoot(Chain);
5072   }
5073 
5074   if (!I.getType()->isVoidTy()) {
5075     if (!isa<VectorType>(I.getType()))
5076       Result = lowerRangeToAssertZExt(DAG, I, Result);
5077 
5078     MaybeAlign Alignment = I.getRetAlign();
5079 
5080     // Insert `assertalign` node if there's an alignment.
5081     if (InsertAssertAlign && Alignment) {
5082       Result =
5083           DAG.getAssertAlign(getCurSDLoc(), Result, Alignment.valueOrOne());
5084     }
5085 
5086     setValue(&I, Result);
5087   }
5088 }
5089 
5090 /// GetSignificand - Get the significand and build it into a floating-point
5091 /// number with exponent of 1:
5092 ///
5093 ///   Op = (Op & 0x007fffff) | 0x3f800000;
5094 ///
5095 /// where Op is the hexadecimal representation of floating point value.
5096 static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl) {
5097   SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
5098                            DAG.getConstant(0x007fffff, dl, MVT::i32));
5099   SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
5100                            DAG.getConstant(0x3f800000, dl, MVT::i32));
5101   return DAG.getNode(ISD::BITCAST, dl, MVT::f32, t2);
5102 }
5103 
5104 /// GetExponent - Get the exponent:
5105 ///
5106 ///   (float)(int)(((Op & 0x7f800000) >> 23) - 127);
5107 ///
5108 /// where Op is the hexadecimal representation of floating point value.
5109 static SDValue GetExponent(SelectionDAG &DAG, SDValue Op,
5110                            const TargetLowering &TLI, const SDLoc &dl) {
5111   SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
5112                            DAG.getConstant(0x7f800000, dl, MVT::i32));
5113   SDValue t1 = DAG.getNode(
5114       ISD::SRL, dl, MVT::i32, t0,
5115       DAG.getConstant(23, dl,
5116                       TLI.getShiftAmountTy(MVT::i32, DAG.getDataLayout())));
5117   SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
5118                            DAG.getConstant(127, dl, MVT::i32));
5119   return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
5120 }
5121 
5122 /// getF32Constant - Get 32-bit floating point constant.
5123 static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt,
5124                               const SDLoc &dl) {
5125   return DAG.getConstantFP(APFloat(APFloat::IEEEsingle(), APInt(32, Flt)), dl,
5126                            MVT::f32);
5127 }
5128 
5129 static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl,
5130                                        SelectionDAG &DAG) {
5131   // TODO: What fast-math-flags should be set on the floating-point nodes?
5132 
5133   //   IntegerPartOfX = ((int32_t)(t0);
5134   SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
5135 
5136   //   FractionalPartOfX = t0 - (float)IntegerPartOfX;
5137   SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
5138   SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
5139 
5140   //   IntegerPartOfX <<= 23;
5141   IntegerPartOfX =
5142       DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
5143                   DAG.getConstant(23, dl,
5144                                   DAG.getTargetLoweringInfo().getShiftAmountTy(
5145                                       MVT::i32, DAG.getDataLayout())));
5146 
5147   SDValue TwoToFractionalPartOfX;
5148   if (LimitFloatPrecision <= 6) {
5149     // For floating-point precision of 6:
5150     //
5151     //   TwoToFractionalPartOfX =
5152     //     0.997535578f +
5153     //       (0.735607626f + 0.252464424f * x) * x;
5154     //
5155     // error 0.0144103317, which is 6 bits
5156     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5157                              getF32Constant(DAG, 0x3e814304, dl));
5158     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5159                              getF32Constant(DAG, 0x3f3c50c8, dl));
5160     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5161     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5162                                          getF32Constant(DAG, 0x3f7f5e7e, dl));
5163   } else if (LimitFloatPrecision <= 12) {
5164     // For floating-point precision of 12:
5165     //
5166     //   TwoToFractionalPartOfX =
5167     //     0.999892986f +
5168     //       (0.696457318f +
5169     //         (0.224338339f + 0.792043434e-1f * x) * x) * x;
5170     //
5171     // error 0.000107046256, which is 13 to 14 bits
5172     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5173                              getF32Constant(DAG, 0x3da235e3, dl));
5174     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5175                              getF32Constant(DAG, 0x3e65b8f3, dl));
5176     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5177     SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5178                              getF32Constant(DAG, 0x3f324b07, dl));
5179     SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5180     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5181                                          getF32Constant(DAG, 0x3f7ff8fd, dl));
5182   } else { // LimitFloatPrecision <= 18
5183     // For floating-point precision of 18:
5184     //
5185     //   TwoToFractionalPartOfX =
5186     //     0.999999982f +
5187     //       (0.693148872f +
5188     //         (0.240227044f +
5189     //           (0.554906021e-1f +
5190     //             (0.961591928e-2f +
5191     //               (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
5192     // error 2.47208000*10^(-7), which is better than 18 bits
5193     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5194                              getF32Constant(DAG, 0x3924b03e, dl));
5195     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5196                              getF32Constant(DAG, 0x3ab24b87, dl));
5197     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5198     SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5199                              getF32Constant(DAG, 0x3c1d8c17, dl));
5200     SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5201     SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5202                              getF32Constant(DAG, 0x3d634a1d, dl));
5203     SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5204     SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5205                              getF32Constant(DAG, 0x3e75fe14, dl));
5206     SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5207     SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
5208                               getF32Constant(DAG, 0x3f317234, dl));
5209     SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
5210     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
5211                                          getF32Constant(DAG, 0x3f800000, dl));
5212   }
5213 
5214   // Add the exponent into the result in integer domain.
5215   SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, TwoToFractionalPartOfX);
5216   return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
5217                      DAG.getNode(ISD::ADD, dl, MVT::i32, t13, IntegerPartOfX));
5218 }
5219 
5220 /// expandExp - Lower an exp intrinsic. Handles the special sequences for
5221 /// limited-precision mode.
5222 static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5223                          const TargetLowering &TLI, SDNodeFlags Flags) {
5224   if (Op.getValueType() == MVT::f32 &&
5225       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5226 
5227     // Put the exponent in the right bit position for later addition to the
5228     // final result:
5229     //
5230     // t0 = Op * log2(e)
5231 
5232     // TODO: What fast-math-flags should be set here?
5233     SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
5234                              DAG.getConstantFP(numbers::log2ef, dl, MVT::f32));
5235     return getLimitedPrecisionExp2(t0, dl, DAG);
5236   }
5237 
5238   // No special expansion.
5239   return DAG.getNode(ISD::FEXP, dl, Op.getValueType(), Op, Flags);
5240 }
5241 
5242 /// expandLog - Lower a log intrinsic. Handles the special sequences for
5243 /// limited-precision mode.
5244 static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5245                          const TargetLowering &TLI, SDNodeFlags Flags) {
5246   // TODO: What fast-math-flags should be set on the floating-point nodes?
5247 
5248   if (Op.getValueType() == MVT::f32 &&
5249       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5250     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5251 
5252     // Scale the exponent by log(2).
5253     SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
5254     SDValue LogOfExponent =
5255         DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
5256                     DAG.getConstantFP(numbers::ln2f, dl, MVT::f32));
5257 
5258     // Get the significand and build it into a floating-point number with
5259     // exponent of 1.
5260     SDValue X = GetSignificand(DAG, Op1, dl);
5261 
5262     SDValue LogOfMantissa;
5263     if (LimitFloatPrecision <= 6) {
5264       // For floating-point precision of 6:
5265       //
5266       //   LogofMantissa =
5267       //     -1.1609546f +
5268       //       (1.4034025f - 0.23903021f * x) * x;
5269       //
5270       // error 0.0034276066, which is better than 8 bits
5271       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5272                                getF32Constant(DAG, 0xbe74c456, dl));
5273       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5274                                getF32Constant(DAG, 0x3fb3a2b1, dl));
5275       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5276       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5277                                   getF32Constant(DAG, 0x3f949a29, dl));
5278     } else if (LimitFloatPrecision <= 12) {
5279       // For floating-point precision of 12:
5280       //
5281       //   LogOfMantissa =
5282       //     -1.7417939f +
5283       //       (2.8212026f +
5284       //         (-1.4699568f +
5285       //           (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
5286       //
5287       // error 0.000061011436, which is 14 bits
5288       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5289                                getF32Constant(DAG, 0xbd67b6d6, dl));
5290       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5291                                getF32Constant(DAG, 0x3ee4f4b8, dl));
5292       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5293       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5294                                getF32Constant(DAG, 0x3fbc278b, dl));
5295       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5296       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5297                                getF32Constant(DAG, 0x40348e95, dl));
5298       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5299       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5300                                   getF32Constant(DAG, 0x3fdef31a, dl));
5301     } else { // LimitFloatPrecision <= 18
5302       // For floating-point precision of 18:
5303       //
5304       //   LogOfMantissa =
5305       //     -2.1072184f +
5306       //       (4.2372794f +
5307       //         (-3.7029485f +
5308       //           (2.2781945f +
5309       //             (-0.87823314f +
5310       //               (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
5311       //
5312       // error 0.0000023660568, which is better than 18 bits
5313       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5314                                getF32Constant(DAG, 0xbc91e5ac, dl));
5315       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5316                                getF32Constant(DAG, 0x3e4350aa, dl));
5317       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5318       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5319                                getF32Constant(DAG, 0x3f60d3e3, dl));
5320       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5321       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5322                                getF32Constant(DAG, 0x4011cdf0, dl));
5323       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5324       SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5325                                getF32Constant(DAG, 0x406cfd1c, dl));
5326       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5327       SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5328                                getF32Constant(DAG, 0x408797cb, dl));
5329       SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5330       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
5331                                   getF32Constant(DAG, 0x4006dcab, dl));
5332     }
5333 
5334     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, LogOfMantissa);
5335   }
5336 
5337   // No special expansion.
5338   return DAG.getNode(ISD::FLOG, dl, Op.getValueType(), Op, Flags);
5339 }
5340 
5341 /// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for
5342 /// limited-precision mode.
5343 static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5344                           const TargetLowering &TLI, SDNodeFlags Flags) {
5345   // TODO: What fast-math-flags should be set on the floating-point nodes?
5346 
5347   if (Op.getValueType() == MVT::f32 &&
5348       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5349     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5350 
5351     // Get the exponent.
5352     SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
5353 
5354     // Get the significand and build it into a floating-point number with
5355     // exponent of 1.
5356     SDValue X = GetSignificand(DAG, Op1, dl);
5357 
5358     // Different possible minimax approximations of significand in
5359     // floating-point for various degrees of accuracy over [1,2].
5360     SDValue Log2ofMantissa;
5361     if (LimitFloatPrecision <= 6) {
5362       // For floating-point precision of 6:
5363       //
5364       //   Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
5365       //
5366       // error 0.0049451742, which is more than 7 bits
5367       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5368                                getF32Constant(DAG, 0xbeb08fe0, dl));
5369       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5370                                getF32Constant(DAG, 0x40019463, dl));
5371       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5372       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5373                                    getF32Constant(DAG, 0x3fd6633d, dl));
5374     } else if (LimitFloatPrecision <= 12) {
5375       // For floating-point precision of 12:
5376       //
5377       //   Log2ofMantissa =
5378       //     -2.51285454f +
5379       //       (4.07009056f +
5380       //         (-2.12067489f +
5381       //           (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
5382       //
5383       // error 0.0000876136000, which is better than 13 bits
5384       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5385                                getF32Constant(DAG, 0xbda7262e, dl));
5386       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5387                                getF32Constant(DAG, 0x3f25280b, dl));
5388       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5389       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5390                                getF32Constant(DAG, 0x4007b923, dl));
5391       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5392       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5393                                getF32Constant(DAG, 0x40823e2f, dl));
5394       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5395       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5396                                    getF32Constant(DAG, 0x4020d29c, dl));
5397     } else { // LimitFloatPrecision <= 18
5398       // For floating-point precision of 18:
5399       //
5400       //   Log2ofMantissa =
5401       //     -3.0400495f +
5402       //       (6.1129976f +
5403       //         (-5.3420409f +
5404       //           (3.2865683f +
5405       //             (-1.2669343f +
5406       //               (0.27515199f -
5407       //                 0.25691327e-1f * x) * x) * x) * x) * x) * x;
5408       //
5409       // error 0.0000018516, which is better than 18 bits
5410       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5411                                getF32Constant(DAG, 0xbcd2769e, dl));
5412       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5413                                getF32Constant(DAG, 0x3e8ce0b9, dl));
5414       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5415       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5416                                getF32Constant(DAG, 0x3fa22ae7, dl));
5417       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5418       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5419                                getF32Constant(DAG, 0x40525723, dl));
5420       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5421       SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5422                                getF32Constant(DAG, 0x40aaf200, dl));
5423       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5424       SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5425                                getF32Constant(DAG, 0x40c39dad, dl));
5426       SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5427       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
5428                                    getF32Constant(DAG, 0x4042902c, dl));
5429     }
5430 
5431     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
5432   }
5433 
5434   // No special expansion.
5435   return DAG.getNode(ISD::FLOG2, dl, Op.getValueType(), Op, Flags);
5436 }
5437 
5438 /// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for
5439 /// limited-precision mode.
5440 static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5441                            const TargetLowering &TLI, SDNodeFlags Flags) {
5442   // TODO: What fast-math-flags should be set on the floating-point nodes?
5443 
5444   if (Op.getValueType() == MVT::f32 &&
5445       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5446     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5447 
5448     // Scale the exponent by log10(2) [0.30102999f].
5449     SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
5450     SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
5451                                         getF32Constant(DAG, 0x3e9a209a, dl));
5452 
5453     // Get the significand and build it into a floating-point number with
5454     // exponent of 1.
5455     SDValue X = GetSignificand(DAG, Op1, dl);
5456 
5457     SDValue Log10ofMantissa;
5458     if (LimitFloatPrecision <= 6) {
5459       // For floating-point precision of 6:
5460       //
5461       //   Log10ofMantissa =
5462       //     -0.50419619f +
5463       //       (0.60948995f - 0.10380950f * x) * x;
5464       //
5465       // error 0.0014886165, which is 6 bits
5466       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5467                                getF32Constant(DAG, 0xbdd49a13, dl));
5468       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5469                                getF32Constant(DAG, 0x3f1c0789, dl));
5470       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5471       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5472                                     getF32Constant(DAG, 0x3f011300, dl));
5473     } else if (LimitFloatPrecision <= 12) {
5474       // For floating-point precision of 12:
5475       //
5476       //   Log10ofMantissa =
5477       //     -0.64831180f +
5478       //       (0.91751397f +
5479       //         (-0.31664806f + 0.47637168e-1f * x) * x) * x;
5480       //
5481       // error 0.00019228036, which is better than 12 bits
5482       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5483                                getF32Constant(DAG, 0x3d431f31, dl));
5484       SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
5485                                getF32Constant(DAG, 0x3ea21fb2, dl));
5486       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5487       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5488                                getF32Constant(DAG, 0x3f6ae232, dl));
5489       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5490       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
5491                                     getF32Constant(DAG, 0x3f25f7c3, dl));
5492     } else { // LimitFloatPrecision <= 18
5493       // For floating-point precision of 18:
5494       //
5495       //   Log10ofMantissa =
5496       //     -0.84299375f +
5497       //       (1.5327582f +
5498       //         (-1.0688956f +
5499       //           (0.49102474f +
5500       //             (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
5501       //
5502       // error 0.0000037995730, which is better than 18 bits
5503       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5504                                getF32Constant(DAG, 0x3c5d51ce, dl));
5505       SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
5506                                getF32Constant(DAG, 0x3e00685a, dl));
5507       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5508       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5509                                getF32Constant(DAG, 0x3efb6798, dl));
5510       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5511       SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
5512                                getF32Constant(DAG, 0x3f88d192, dl));
5513       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5514       SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5515                                getF32Constant(DAG, 0x3fc4316c, dl));
5516       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5517       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
5518                                     getF32Constant(DAG, 0x3f57ce70, dl));
5519     }
5520 
5521     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
5522   }
5523 
5524   // No special expansion.
5525   return DAG.getNode(ISD::FLOG10, dl, Op.getValueType(), Op, Flags);
5526 }
5527 
5528 /// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for
5529 /// limited-precision mode.
5530 static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5531                           const TargetLowering &TLI, SDNodeFlags Flags) {
5532   if (Op.getValueType() == MVT::f32 &&
5533       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18)
5534     return getLimitedPrecisionExp2(Op, dl, DAG);
5535 
5536   // No special expansion.
5537   return DAG.getNode(ISD::FEXP2, dl, Op.getValueType(), Op, Flags);
5538 }
5539 
5540 /// visitPow - Lower a pow intrinsic. Handles the special sequences for
5541 /// limited-precision mode with x == 10.0f.
5542 static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS,
5543                          SelectionDAG &DAG, const TargetLowering &TLI,
5544                          SDNodeFlags Flags) {
5545   bool IsExp10 = false;
5546   if (LHS.getValueType() == MVT::f32 && RHS.getValueType() == MVT::f32 &&
5547       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5548     if (ConstantFPSDNode *LHSC = dyn_cast<ConstantFPSDNode>(LHS)) {
5549       APFloat Ten(10.0f);
5550       IsExp10 = LHSC->isExactlyValue(Ten);
5551     }
5552   }
5553 
5554   // TODO: What fast-math-flags should be set on the FMUL node?
5555   if (IsExp10) {
5556     // Put the exponent in the right bit position for later addition to the
5557     // final result:
5558     //
5559     //   #define LOG2OF10 3.3219281f
5560     //   t0 = Op * LOG2OF10;
5561     SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, RHS,
5562                              getF32Constant(DAG, 0x40549a78, dl));
5563     return getLimitedPrecisionExp2(t0, dl, DAG);
5564   }
5565 
5566   // No special expansion.
5567   return DAG.getNode(ISD::FPOW, dl, LHS.getValueType(), LHS, RHS, Flags);
5568 }
5569 
5570 /// ExpandPowI - Expand a llvm.powi intrinsic.
5571 static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS,
5572                           SelectionDAG &DAG) {
5573   // If RHS is a constant, we can expand this out to a multiplication tree if
5574   // it's beneficial on the target, otherwise we end up lowering to a call to
5575   // __powidf2 (for example).
5576   if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
5577     unsigned Val = RHSC->getSExtValue();
5578 
5579     // powi(x, 0) -> 1.0
5580     if (Val == 0)
5581       return DAG.getConstantFP(1.0, DL, LHS.getValueType());
5582 
5583     if (DAG.getTargetLoweringInfo().isBeneficialToExpandPowI(
5584             Val, DAG.shouldOptForSize())) {
5585       // Get the exponent as a positive value.
5586       if ((int)Val < 0)
5587         Val = -Val;
5588       // We use the simple binary decomposition method to generate the multiply
5589       // sequence.  There are more optimal ways to do this (for example,
5590       // powi(x,15) generates one more multiply than it should), but this has
5591       // the benefit of being both really simple and much better than a libcall.
5592       SDValue Res; // Logically starts equal to 1.0
5593       SDValue CurSquare = LHS;
5594       // TODO: Intrinsics should have fast-math-flags that propagate to these
5595       // nodes.
5596       while (Val) {
5597         if (Val & 1) {
5598           if (Res.getNode())
5599             Res =
5600                 DAG.getNode(ISD::FMUL, DL, Res.getValueType(), Res, CurSquare);
5601           else
5602             Res = CurSquare; // 1.0*CurSquare.
5603         }
5604 
5605         CurSquare = DAG.getNode(ISD::FMUL, DL, CurSquare.getValueType(),
5606                                 CurSquare, CurSquare);
5607         Val >>= 1;
5608       }
5609 
5610       // If the original was negative, invert the result, producing 1/(x*x*x).
5611       if (RHSC->getSExtValue() < 0)
5612         Res = DAG.getNode(ISD::FDIV, DL, LHS.getValueType(),
5613                           DAG.getConstantFP(1.0, DL, LHS.getValueType()), Res);
5614       return Res;
5615     }
5616   }
5617 
5618   // Otherwise, expand to a libcall.
5619   return DAG.getNode(ISD::FPOWI, DL, LHS.getValueType(), LHS, RHS);
5620 }
5621 
5622 static SDValue expandDivFix(unsigned Opcode, const SDLoc &DL,
5623                             SDValue LHS, SDValue RHS, SDValue Scale,
5624                             SelectionDAG &DAG, const TargetLowering &TLI) {
5625   EVT VT = LHS.getValueType();
5626   bool Signed = Opcode == ISD::SDIVFIX || Opcode == ISD::SDIVFIXSAT;
5627   bool Saturating = Opcode == ISD::SDIVFIXSAT || Opcode == ISD::UDIVFIXSAT;
5628   LLVMContext &Ctx = *DAG.getContext();
5629 
5630   // If the type is legal but the operation isn't, this node might survive all
5631   // the way to operation legalization. If we end up there and we do not have
5632   // the ability to widen the type (if VT*2 is not legal), we cannot expand the
5633   // node.
5634 
5635   // Coax the legalizer into expanding the node during type legalization instead
5636   // by bumping the size by one bit. This will force it to Promote, enabling the
5637   // early expansion and avoiding the need to expand later.
5638 
5639   // We don't have to do this if Scale is 0; that can always be expanded, unless
5640   // it's a saturating signed operation. Those can experience true integer
5641   // division overflow, a case which we must avoid.
5642 
5643   // FIXME: We wouldn't have to do this (or any of the early
5644   // expansion/promotion) if it was possible to expand a libcall of an
5645   // illegal type during operation legalization. But it's not, so things
5646   // get a bit hacky.
5647   unsigned ScaleInt = cast<ConstantSDNode>(Scale)->getZExtValue();
5648   if ((ScaleInt > 0 || (Saturating && Signed)) &&
5649       (TLI.isTypeLegal(VT) ||
5650        (VT.isVector() && TLI.isTypeLegal(VT.getVectorElementType())))) {
5651     TargetLowering::LegalizeAction Action = TLI.getFixedPointOperationAction(
5652         Opcode, VT, ScaleInt);
5653     if (Action != TargetLowering::Legal && Action != TargetLowering::Custom) {
5654       EVT PromVT;
5655       if (VT.isScalarInteger())
5656         PromVT = EVT::getIntegerVT(Ctx, VT.getSizeInBits() + 1);
5657       else if (VT.isVector()) {
5658         PromVT = VT.getVectorElementType();
5659         PromVT = EVT::getIntegerVT(Ctx, PromVT.getSizeInBits() + 1);
5660         PromVT = EVT::getVectorVT(Ctx, PromVT, VT.getVectorElementCount());
5661       } else
5662         llvm_unreachable("Wrong VT for DIVFIX?");
5663       LHS = DAG.getExtOrTrunc(Signed, LHS, DL, PromVT);
5664       RHS = DAG.getExtOrTrunc(Signed, RHS, DL, PromVT);
5665       EVT ShiftTy = TLI.getShiftAmountTy(PromVT, DAG.getDataLayout());
5666       // For saturating operations, we need to shift up the LHS to get the
5667       // proper saturation width, and then shift down again afterwards.
5668       if (Saturating)
5669         LHS = DAG.getNode(ISD::SHL, DL, PromVT, LHS,
5670                           DAG.getConstant(1, DL, ShiftTy));
5671       SDValue Res = DAG.getNode(Opcode, DL, PromVT, LHS, RHS, Scale);
5672       if (Saturating)
5673         Res = DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, PromVT, Res,
5674                           DAG.getConstant(1, DL, ShiftTy));
5675       return DAG.getZExtOrTrunc(Res, DL, VT);
5676     }
5677   }
5678 
5679   return DAG.getNode(Opcode, DL, VT, LHS, RHS, Scale);
5680 }
5681 
5682 // getUnderlyingArgRegs - Find underlying registers used for a truncated,
5683 // bitcasted, or split argument. Returns a list of <Register, size in bits>
5684 static void
5685 getUnderlyingArgRegs(SmallVectorImpl<std::pair<unsigned, TypeSize>> &Regs,
5686                      const SDValue &N) {
5687   switch (N.getOpcode()) {
5688   case ISD::CopyFromReg: {
5689     SDValue Op = N.getOperand(1);
5690     Regs.emplace_back(cast<RegisterSDNode>(Op)->getReg(),
5691                       Op.getValueType().getSizeInBits());
5692     return;
5693   }
5694   case ISD::BITCAST:
5695   case ISD::AssertZext:
5696   case ISD::AssertSext:
5697   case ISD::TRUNCATE:
5698     getUnderlyingArgRegs(Regs, N.getOperand(0));
5699     return;
5700   case ISD::BUILD_PAIR:
5701   case ISD::BUILD_VECTOR:
5702   case ISD::CONCAT_VECTORS:
5703     for (SDValue Op : N->op_values())
5704       getUnderlyingArgRegs(Regs, Op);
5705     return;
5706   default:
5707     return;
5708   }
5709 }
5710 
5711 /// If the DbgValueInst is a dbg_value of a function argument, create the
5712 /// corresponding DBG_VALUE machine instruction for it now.  At the end of
5713 /// instruction selection, they will be inserted to the entry BB.
5714 /// We don't currently support this for variadic dbg_values, as they shouldn't
5715 /// appear for function arguments or in the prologue.
5716 bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
5717     const Value *V, DILocalVariable *Variable, DIExpression *Expr,
5718     DILocation *DL, FuncArgumentDbgValueKind Kind, const SDValue &N) {
5719   const Argument *Arg = dyn_cast<Argument>(V);
5720   if (!Arg)
5721     return false;
5722 
5723   MachineFunction &MF = DAG.getMachineFunction();
5724   const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
5725 
5726   // Helper to create DBG_INSTR_REFs or DBG_VALUEs, depending on what kind
5727   // we've been asked to pursue.
5728   auto MakeVRegDbgValue = [&](Register Reg, DIExpression *FragExpr,
5729                               bool Indirect) {
5730     if (Reg.isVirtual() && MF.useDebugInstrRef()) {
5731       // For VRegs, in instruction referencing mode, create a DBG_INSTR_REF
5732       // pointing at the VReg, which will be patched up later.
5733       auto &Inst = TII->get(TargetOpcode::DBG_INSTR_REF);
5734       SmallVector<MachineOperand, 1> MOs({MachineOperand::CreateReg(
5735           /* Reg */ Reg, /* isDef */ false, /* isImp */ false,
5736           /* isKill */ false, /* isDead */ false,
5737           /* isUndef */ false, /* isEarlyClobber */ false,
5738           /* SubReg */ 0, /* isDebug */ true)});
5739 
5740       auto *NewDIExpr = FragExpr;
5741       // We don't have an "Indirect" field in DBG_INSTR_REF, fold that into
5742       // the DIExpression.
5743       if (Indirect)
5744         NewDIExpr = DIExpression::prepend(FragExpr, DIExpression::DerefBefore);
5745       SmallVector<uint64_t, 2> Ops({dwarf::DW_OP_LLVM_arg, 0});
5746       NewDIExpr = DIExpression::prependOpcodes(NewDIExpr, Ops);
5747       return BuildMI(MF, DL, Inst, false, MOs, Variable, NewDIExpr);
5748     } else {
5749       // Create a completely standard DBG_VALUE.
5750       auto &Inst = TII->get(TargetOpcode::DBG_VALUE);
5751       return BuildMI(MF, DL, Inst, Indirect, Reg, Variable, FragExpr);
5752     }
5753   };
5754 
5755   if (Kind == FuncArgumentDbgValueKind::Value) {
5756     // ArgDbgValues are hoisted to the beginning of the entry block. So we
5757     // should only emit as ArgDbgValue if the dbg.value intrinsic is found in
5758     // the entry block.
5759     bool IsInEntryBlock = FuncInfo.MBB == &FuncInfo.MF->front();
5760     if (!IsInEntryBlock)
5761       return false;
5762 
5763     // ArgDbgValues are hoisted to the beginning of the entry block.  So we
5764     // should only emit as ArgDbgValue if the dbg.value intrinsic describes a
5765     // variable that also is a param.
5766     //
5767     // Although, if we are at the top of the entry block already, we can still
5768     // emit using ArgDbgValue. This might catch some situations when the
5769     // dbg.value refers to an argument that isn't used in the entry block, so
5770     // any CopyToReg node would be optimized out and the only way to express
5771     // this DBG_VALUE is by using the physical reg (or FI) as done in this
5772     // method.  ArgDbgValues are hoisted to the beginning of the entry block. So
5773     // we should only emit as ArgDbgValue if the Variable is an argument to the
5774     // current function, and the dbg.value intrinsic is found in the entry
5775     // block.
5776     bool VariableIsFunctionInputArg = Variable->isParameter() &&
5777         !DL->getInlinedAt();
5778     bool IsInPrologue = SDNodeOrder == LowestSDNodeOrder;
5779     if (!IsInPrologue && !VariableIsFunctionInputArg)
5780       return false;
5781 
5782     // Here we assume that a function argument on IR level only can be used to
5783     // describe one input parameter on source level. If we for example have
5784     // source code like this
5785     //
5786     //    struct A { long x, y; };
5787     //    void foo(struct A a, long b) {
5788     //      ...
5789     //      b = a.x;
5790     //      ...
5791     //    }
5792     //
5793     // and IR like this
5794     //
5795     //  define void @foo(i32 %a1, i32 %a2, i32 %b)  {
5796     //  entry:
5797     //    call void @llvm.dbg.value(metadata i32 %a1, "a", DW_OP_LLVM_fragment
5798     //    call void @llvm.dbg.value(metadata i32 %a2, "a", DW_OP_LLVM_fragment
5799     //    call void @llvm.dbg.value(metadata i32 %b, "b",
5800     //    ...
5801     //    call void @llvm.dbg.value(metadata i32 %a1, "b"
5802     //    ...
5803     //
5804     // then the last dbg.value is describing a parameter "b" using a value that
5805     // is an argument. But since we already has used %a1 to describe a parameter
5806     // we should not handle that last dbg.value here (that would result in an
5807     // incorrect hoisting of the DBG_VALUE to the function entry).
5808     // Notice that we allow one dbg.value per IR level argument, to accommodate
5809     // for the situation with fragments above.
5810     if (VariableIsFunctionInputArg) {
5811       unsigned ArgNo = Arg->getArgNo();
5812       if (ArgNo >= FuncInfo.DescribedArgs.size())
5813         FuncInfo.DescribedArgs.resize(ArgNo + 1, false);
5814       else if (!IsInPrologue && FuncInfo.DescribedArgs.test(ArgNo))
5815         return false;
5816       FuncInfo.DescribedArgs.set(ArgNo);
5817     }
5818   }
5819 
5820   bool IsIndirect = false;
5821   std::optional<MachineOperand> Op;
5822   // Some arguments' frame index is recorded during argument lowering.
5823   int FI = FuncInfo.getArgumentFrameIndex(Arg);
5824   if (FI != std::numeric_limits<int>::max())
5825     Op = MachineOperand::CreateFI(FI);
5826 
5827   SmallVector<std::pair<unsigned, TypeSize>, 8> ArgRegsAndSizes;
5828   if (!Op && N.getNode()) {
5829     getUnderlyingArgRegs(ArgRegsAndSizes, N);
5830     Register Reg;
5831     if (ArgRegsAndSizes.size() == 1)
5832       Reg = ArgRegsAndSizes.front().first;
5833 
5834     if (Reg && Reg.isVirtual()) {
5835       MachineRegisterInfo &RegInfo = MF.getRegInfo();
5836       Register PR = RegInfo.getLiveInPhysReg(Reg);
5837       if (PR)
5838         Reg = PR;
5839     }
5840     if (Reg) {
5841       Op = MachineOperand::CreateReg(Reg, false);
5842       IsIndirect = Kind != FuncArgumentDbgValueKind::Value;
5843     }
5844   }
5845 
5846   if (!Op && N.getNode()) {
5847     // Check if frame index is available.
5848     SDValue LCandidate = peekThroughBitcasts(N);
5849     if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(LCandidate.getNode()))
5850       if (FrameIndexSDNode *FINode =
5851           dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
5852         Op = MachineOperand::CreateFI(FINode->getIndex());
5853   }
5854 
5855   if (!Op) {
5856     // Create a DBG_VALUE for each decomposed value in ArgRegs to cover Reg
5857     auto splitMultiRegDbgValue = [&](ArrayRef<std::pair<unsigned, TypeSize>>
5858                                          SplitRegs) {
5859       unsigned Offset = 0;
5860       for (const auto &RegAndSize : SplitRegs) {
5861         // If the expression is already a fragment, the current register
5862         // offset+size might extend beyond the fragment. In this case, only
5863         // the register bits that are inside the fragment are relevant.
5864         int RegFragmentSizeInBits = RegAndSize.second;
5865         if (auto ExprFragmentInfo = Expr->getFragmentInfo()) {
5866           uint64_t ExprFragmentSizeInBits = ExprFragmentInfo->SizeInBits;
5867           // The register is entirely outside the expression fragment,
5868           // so is irrelevant for debug info.
5869           if (Offset >= ExprFragmentSizeInBits)
5870             break;
5871           // The register is partially outside the expression fragment, only
5872           // the low bits within the fragment are relevant for debug info.
5873           if (Offset + RegFragmentSizeInBits > ExprFragmentSizeInBits) {
5874             RegFragmentSizeInBits = ExprFragmentSizeInBits - Offset;
5875           }
5876         }
5877 
5878         auto FragmentExpr = DIExpression::createFragmentExpression(
5879             Expr, Offset, RegFragmentSizeInBits);
5880         Offset += RegAndSize.second;
5881         // If a valid fragment expression cannot be created, the variable's
5882         // correct value cannot be determined and so it is set as Undef.
5883         if (!FragmentExpr) {
5884           SDDbgValue *SDV = DAG.getConstantDbgValue(
5885               Variable, Expr, UndefValue::get(V->getType()), DL, SDNodeOrder);
5886           DAG.AddDbgValue(SDV, false);
5887           continue;
5888         }
5889         MachineInstr *NewMI =
5890             MakeVRegDbgValue(RegAndSize.first, *FragmentExpr,
5891                              Kind != FuncArgumentDbgValueKind::Value);
5892         FuncInfo.ArgDbgValues.push_back(NewMI);
5893       }
5894     };
5895 
5896     // Check if ValueMap has reg number.
5897     DenseMap<const Value *, Register>::const_iterator
5898       VMI = FuncInfo.ValueMap.find(V);
5899     if (VMI != FuncInfo.ValueMap.end()) {
5900       const auto &TLI = DAG.getTargetLoweringInfo();
5901       RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), VMI->second,
5902                        V->getType(), std::nullopt);
5903       if (RFV.occupiesMultipleRegs()) {
5904         splitMultiRegDbgValue(RFV.getRegsAndSizes());
5905         return true;
5906       }
5907 
5908       Op = MachineOperand::CreateReg(VMI->second, false);
5909       IsIndirect = Kind != FuncArgumentDbgValueKind::Value;
5910     } else if (ArgRegsAndSizes.size() > 1) {
5911       // This was split due to the calling convention, and no virtual register
5912       // mapping exists for the value.
5913       splitMultiRegDbgValue(ArgRegsAndSizes);
5914       return true;
5915     }
5916   }
5917 
5918   if (!Op)
5919     return false;
5920 
5921   assert(Variable->isValidLocationForIntrinsic(DL) &&
5922          "Expected inlined-at fields to agree");
5923   MachineInstr *NewMI = nullptr;
5924 
5925   if (Op->isReg())
5926     NewMI = MakeVRegDbgValue(Op->getReg(), Expr, IsIndirect);
5927   else
5928     NewMI = BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), true, *Op,
5929                     Variable, Expr);
5930 
5931   // Otherwise, use ArgDbgValues.
5932   FuncInfo.ArgDbgValues.push_back(NewMI);
5933   return true;
5934 }
5935 
5936 /// Return the appropriate SDDbgValue based on N.
5937 SDDbgValue *SelectionDAGBuilder::getDbgValue(SDValue N,
5938                                              DILocalVariable *Variable,
5939                                              DIExpression *Expr,
5940                                              const DebugLoc &dl,
5941                                              unsigned DbgSDNodeOrder) {
5942   if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) {
5943     // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can describe
5944     // stack slot locations.
5945     //
5946     // Consider "int x = 0; int *px = &x;". There are two kinds of interesting
5947     // debug values here after optimization:
5948     //
5949     //   dbg.value(i32* %px, !"int *px", !DIExpression()), and
5950     //   dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref))
5951     //
5952     // Both describe the direct values of their associated variables.
5953     return DAG.getFrameIndexDbgValue(Variable, Expr, FISDN->getIndex(),
5954                                      /*IsIndirect*/ false, dl, DbgSDNodeOrder);
5955   }
5956   return DAG.getDbgValue(Variable, Expr, N.getNode(), N.getResNo(),
5957                          /*IsIndirect*/ false, dl, DbgSDNodeOrder);
5958 }
5959 
5960 static unsigned FixedPointIntrinsicToOpcode(unsigned Intrinsic) {
5961   switch (Intrinsic) {
5962   case Intrinsic::smul_fix:
5963     return ISD::SMULFIX;
5964   case Intrinsic::umul_fix:
5965     return ISD::UMULFIX;
5966   case Intrinsic::smul_fix_sat:
5967     return ISD::SMULFIXSAT;
5968   case Intrinsic::umul_fix_sat:
5969     return ISD::UMULFIXSAT;
5970   case Intrinsic::sdiv_fix:
5971     return ISD::SDIVFIX;
5972   case Intrinsic::udiv_fix:
5973     return ISD::UDIVFIX;
5974   case Intrinsic::sdiv_fix_sat:
5975     return ISD::SDIVFIXSAT;
5976   case Intrinsic::udiv_fix_sat:
5977     return ISD::UDIVFIXSAT;
5978   default:
5979     llvm_unreachable("Unhandled fixed point intrinsic");
5980   }
5981 }
5982 
5983 void SelectionDAGBuilder::lowerCallToExternalSymbol(const CallInst &I,
5984                                            const char *FunctionName) {
5985   assert(FunctionName && "FunctionName must not be nullptr");
5986   SDValue Callee = DAG.getExternalSymbol(
5987       FunctionName,
5988       DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()));
5989   LowerCallTo(I, Callee, I.isTailCall(), I.isMustTailCall());
5990 }
5991 
5992 /// Given a @llvm.call.preallocated.setup, return the corresponding
5993 /// preallocated call.
5994 static const CallBase *FindPreallocatedCall(const Value *PreallocatedSetup) {
5995   assert(cast<CallBase>(PreallocatedSetup)
5996                  ->getCalledFunction()
5997                  ->getIntrinsicID() == Intrinsic::call_preallocated_setup &&
5998          "expected call_preallocated_setup Value");
5999   for (const auto *U : PreallocatedSetup->users()) {
6000     auto *UseCall = cast<CallBase>(U);
6001     const Function *Fn = UseCall->getCalledFunction();
6002     if (!Fn || Fn->getIntrinsicID() != Intrinsic::call_preallocated_arg) {
6003       return UseCall;
6004     }
6005   }
6006   llvm_unreachable("expected corresponding call to preallocated setup/arg");
6007 }
6008 
6009 /// If DI is a debug value with an EntryValue expression, lower it using the
6010 /// corresponding physical register of the associated Argument value
6011 /// (guaranteed to exist by the verifier).
6012 bool SelectionDAGBuilder::visitEntryValueDbgValue(const DbgValueInst &DI) {
6013   DILocalVariable *Variable = DI.getVariable();
6014   DIExpression *Expr = DI.getExpression();
6015   if (!Expr->isEntryValue() || !hasSingleElement(DI.getValues()))
6016     return false;
6017 
6018   // These properties are guaranteed by the verifier.
6019   Argument *Arg = cast<Argument>(DI.getValue(0));
6020   assert(Arg->hasAttribute(Attribute::AttrKind::SwiftAsync));
6021 
6022   auto ArgIt = FuncInfo.ValueMap.find(Arg);
6023   if (ArgIt == FuncInfo.ValueMap.end()) {
6024     LLVM_DEBUG(
6025         dbgs() << "Dropping dbg.value: expression is entry_value but "
6026                   "couldn't find an associated register for the Argument\n");
6027     return true;
6028   }
6029   Register ArgVReg = ArgIt->getSecond();
6030 
6031   for (auto [PhysReg, VirtReg] : FuncInfo.RegInfo->liveins())
6032     if (ArgVReg == VirtReg || ArgVReg == PhysReg) {
6033       SDDbgValue *SDV =
6034           DAG.getVRegDbgValue(Variable, Expr, PhysReg, false /*IsIndidrect*/,
6035                               DI.getDebugLoc(), SDNodeOrder);
6036       DAG.AddDbgValue(SDV, false /*treat as dbg.declare byval parameter*/);
6037       return true;
6038     }
6039   LLVM_DEBUG(dbgs() << "Dropping dbg.value: expression is entry_value but "
6040                        "couldn't find a physical register\n");
6041   return true;
6042 }
6043 
6044 /// Lower the call to the specified intrinsic function.
6045 void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
6046                                              unsigned Intrinsic) {
6047   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6048   SDLoc sdl = getCurSDLoc();
6049   DebugLoc dl = getCurDebugLoc();
6050   SDValue Res;
6051 
6052   SDNodeFlags Flags;
6053   if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
6054     Flags.copyFMF(*FPOp);
6055 
6056   switch (Intrinsic) {
6057   default:
6058     // By default, turn this into a target intrinsic node.
6059     visitTargetIntrinsic(I, Intrinsic);
6060     return;
6061   case Intrinsic::vscale: {
6062     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6063     setValue(&I, DAG.getVScale(sdl, VT, APInt(VT.getSizeInBits(), 1)));
6064     return;
6065   }
6066   case Intrinsic::vastart:  visitVAStart(I); return;
6067   case Intrinsic::vaend:    visitVAEnd(I); return;
6068   case Intrinsic::vacopy:   visitVACopy(I); return;
6069   case Intrinsic::returnaddress:
6070     setValue(&I, DAG.getNode(ISD::RETURNADDR, sdl,
6071                              TLI.getValueType(DAG.getDataLayout(), I.getType()),
6072                              getValue(I.getArgOperand(0))));
6073     return;
6074   case Intrinsic::addressofreturnaddress:
6075     setValue(&I,
6076              DAG.getNode(ISD::ADDROFRETURNADDR, sdl,
6077                          TLI.getValueType(DAG.getDataLayout(), I.getType())));
6078     return;
6079   case Intrinsic::sponentry:
6080     setValue(&I,
6081              DAG.getNode(ISD::SPONENTRY, sdl,
6082                          TLI.getValueType(DAG.getDataLayout(), I.getType())));
6083     return;
6084   case Intrinsic::frameaddress:
6085     setValue(&I, DAG.getNode(ISD::FRAMEADDR, sdl,
6086                              TLI.getFrameIndexTy(DAG.getDataLayout()),
6087                              getValue(I.getArgOperand(0))));
6088     return;
6089   case Intrinsic::read_volatile_register:
6090   case Intrinsic::read_register: {
6091     Value *Reg = I.getArgOperand(0);
6092     SDValue Chain = getRoot();
6093     SDValue RegName =
6094         DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
6095     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6096     Res = DAG.getNode(ISD::READ_REGISTER, sdl,
6097       DAG.getVTList(VT, MVT::Other), Chain, RegName);
6098     setValue(&I, Res);
6099     DAG.setRoot(Res.getValue(1));
6100     return;
6101   }
6102   case Intrinsic::write_register: {
6103     Value *Reg = I.getArgOperand(0);
6104     Value *RegValue = I.getArgOperand(1);
6105     SDValue Chain = getRoot();
6106     SDValue RegName =
6107         DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
6108     DAG.setRoot(DAG.getNode(ISD::WRITE_REGISTER, sdl, MVT::Other, Chain,
6109                             RegName, getValue(RegValue)));
6110     return;
6111   }
6112   case Intrinsic::memcpy: {
6113     const auto &MCI = cast<MemCpyInst>(I);
6114     SDValue Op1 = getValue(I.getArgOperand(0));
6115     SDValue Op2 = getValue(I.getArgOperand(1));
6116     SDValue Op3 = getValue(I.getArgOperand(2));
6117     // @llvm.memcpy defines 0 and 1 to both mean no alignment.
6118     Align DstAlign = MCI.getDestAlign().valueOrOne();
6119     Align SrcAlign = MCI.getSourceAlign().valueOrOne();
6120     Align Alignment = std::min(DstAlign, SrcAlign);
6121     bool isVol = MCI.isVolatile();
6122     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6123     // FIXME: Support passing different dest/src alignments to the memcpy DAG
6124     // node.
6125     SDValue Root = isVol ? getRoot() : getMemoryRoot();
6126     SDValue MC = DAG.getMemcpy(
6127         Root, sdl, Op1, Op2, Op3, Alignment, isVol,
6128         /* AlwaysInline */ false, isTC, MachinePointerInfo(I.getArgOperand(0)),
6129         MachinePointerInfo(I.getArgOperand(1)), I.getAAMetadata(), AA);
6130     updateDAGForMaybeTailCall(MC);
6131     return;
6132   }
6133   case Intrinsic::memcpy_inline: {
6134     const auto &MCI = cast<MemCpyInlineInst>(I);
6135     SDValue Dst = getValue(I.getArgOperand(0));
6136     SDValue Src = getValue(I.getArgOperand(1));
6137     SDValue Size = getValue(I.getArgOperand(2));
6138     assert(isa<ConstantSDNode>(Size) && "memcpy_inline needs constant size");
6139     // @llvm.memcpy.inline defines 0 and 1 to both mean no alignment.
6140     Align DstAlign = MCI.getDestAlign().valueOrOne();
6141     Align SrcAlign = MCI.getSourceAlign().valueOrOne();
6142     Align Alignment = std::min(DstAlign, SrcAlign);
6143     bool isVol = MCI.isVolatile();
6144     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6145     // FIXME: Support passing different dest/src alignments to the memcpy DAG
6146     // node.
6147     SDValue MC = DAG.getMemcpy(
6148         getRoot(), sdl, Dst, Src, Size, Alignment, isVol,
6149         /* AlwaysInline */ true, isTC, MachinePointerInfo(I.getArgOperand(0)),
6150         MachinePointerInfo(I.getArgOperand(1)), I.getAAMetadata(), AA);
6151     updateDAGForMaybeTailCall(MC);
6152     return;
6153   }
6154   case Intrinsic::memset: {
6155     const auto &MSI = cast<MemSetInst>(I);
6156     SDValue Op1 = getValue(I.getArgOperand(0));
6157     SDValue Op2 = getValue(I.getArgOperand(1));
6158     SDValue Op3 = getValue(I.getArgOperand(2));
6159     // @llvm.memset defines 0 and 1 to both mean no alignment.
6160     Align Alignment = MSI.getDestAlign().valueOrOne();
6161     bool isVol = MSI.isVolatile();
6162     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6163     SDValue Root = isVol ? getRoot() : getMemoryRoot();
6164     SDValue MS = DAG.getMemset(
6165         Root, sdl, Op1, Op2, Op3, Alignment, isVol, /* AlwaysInline */ false,
6166         isTC, MachinePointerInfo(I.getArgOperand(0)), I.getAAMetadata());
6167     updateDAGForMaybeTailCall(MS);
6168     return;
6169   }
6170   case Intrinsic::memset_inline: {
6171     const auto &MSII = cast<MemSetInlineInst>(I);
6172     SDValue Dst = getValue(I.getArgOperand(0));
6173     SDValue Value = getValue(I.getArgOperand(1));
6174     SDValue Size = getValue(I.getArgOperand(2));
6175     assert(isa<ConstantSDNode>(Size) && "memset_inline needs constant size");
6176     // @llvm.memset defines 0 and 1 to both mean no alignment.
6177     Align DstAlign = MSII.getDestAlign().valueOrOne();
6178     bool isVol = MSII.isVolatile();
6179     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6180     SDValue Root = isVol ? getRoot() : getMemoryRoot();
6181     SDValue MC = DAG.getMemset(Root, sdl, Dst, Value, Size, DstAlign, isVol,
6182                                /* AlwaysInline */ true, isTC,
6183                                MachinePointerInfo(I.getArgOperand(0)),
6184                                I.getAAMetadata());
6185     updateDAGForMaybeTailCall(MC);
6186     return;
6187   }
6188   case Intrinsic::memmove: {
6189     const auto &MMI = cast<MemMoveInst>(I);
6190     SDValue Op1 = getValue(I.getArgOperand(0));
6191     SDValue Op2 = getValue(I.getArgOperand(1));
6192     SDValue Op3 = getValue(I.getArgOperand(2));
6193     // @llvm.memmove defines 0 and 1 to both mean no alignment.
6194     Align DstAlign = MMI.getDestAlign().valueOrOne();
6195     Align SrcAlign = MMI.getSourceAlign().valueOrOne();
6196     Align Alignment = std::min(DstAlign, SrcAlign);
6197     bool isVol = MMI.isVolatile();
6198     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6199     // FIXME: Support passing different dest/src alignments to the memmove DAG
6200     // node.
6201     SDValue Root = isVol ? getRoot() : getMemoryRoot();
6202     SDValue MM = DAG.getMemmove(Root, sdl, Op1, Op2, Op3, Alignment, isVol,
6203                                 isTC, MachinePointerInfo(I.getArgOperand(0)),
6204                                 MachinePointerInfo(I.getArgOperand(1)),
6205                                 I.getAAMetadata(), AA);
6206     updateDAGForMaybeTailCall(MM);
6207     return;
6208   }
6209   case Intrinsic::memcpy_element_unordered_atomic: {
6210     const AtomicMemCpyInst &MI = cast<AtomicMemCpyInst>(I);
6211     SDValue Dst = getValue(MI.getRawDest());
6212     SDValue Src = getValue(MI.getRawSource());
6213     SDValue Length = getValue(MI.getLength());
6214 
6215     Type *LengthTy = MI.getLength()->getType();
6216     unsigned ElemSz = MI.getElementSizeInBytes();
6217     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6218     SDValue MC =
6219         DAG.getAtomicMemcpy(getRoot(), sdl, Dst, Src, Length, LengthTy, ElemSz,
6220                             isTC, MachinePointerInfo(MI.getRawDest()),
6221                             MachinePointerInfo(MI.getRawSource()));
6222     updateDAGForMaybeTailCall(MC);
6223     return;
6224   }
6225   case Intrinsic::memmove_element_unordered_atomic: {
6226     auto &MI = cast<AtomicMemMoveInst>(I);
6227     SDValue Dst = getValue(MI.getRawDest());
6228     SDValue Src = getValue(MI.getRawSource());
6229     SDValue Length = getValue(MI.getLength());
6230 
6231     Type *LengthTy = MI.getLength()->getType();
6232     unsigned ElemSz = MI.getElementSizeInBytes();
6233     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6234     SDValue MC =
6235         DAG.getAtomicMemmove(getRoot(), sdl, Dst, Src, Length, LengthTy, ElemSz,
6236                              isTC, MachinePointerInfo(MI.getRawDest()),
6237                              MachinePointerInfo(MI.getRawSource()));
6238     updateDAGForMaybeTailCall(MC);
6239     return;
6240   }
6241   case Intrinsic::memset_element_unordered_atomic: {
6242     auto &MI = cast<AtomicMemSetInst>(I);
6243     SDValue Dst = getValue(MI.getRawDest());
6244     SDValue Val = getValue(MI.getValue());
6245     SDValue Length = getValue(MI.getLength());
6246 
6247     Type *LengthTy = MI.getLength()->getType();
6248     unsigned ElemSz = MI.getElementSizeInBytes();
6249     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6250     SDValue MC =
6251         DAG.getAtomicMemset(getRoot(), sdl, Dst, Val, Length, LengthTy, ElemSz,
6252                             isTC, MachinePointerInfo(MI.getRawDest()));
6253     updateDAGForMaybeTailCall(MC);
6254     return;
6255   }
6256   case Intrinsic::call_preallocated_setup: {
6257     const CallBase *PreallocatedCall = FindPreallocatedCall(&I);
6258     SDValue SrcValue = DAG.getSrcValue(PreallocatedCall);
6259     SDValue Res = DAG.getNode(ISD::PREALLOCATED_SETUP, sdl, MVT::Other,
6260                               getRoot(), SrcValue);
6261     setValue(&I, Res);
6262     DAG.setRoot(Res);
6263     return;
6264   }
6265   case Intrinsic::call_preallocated_arg: {
6266     const CallBase *PreallocatedCall = FindPreallocatedCall(I.getOperand(0));
6267     SDValue SrcValue = DAG.getSrcValue(PreallocatedCall);
6268     SDValue Ops[3];
6269     Ops[0] = getRoot();
6270     Ops[1] = SrcValue;
6271     Ops[2] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(1)), sdl,
6272                                    MVT::i32); // arg index
6273     SDValue Res = DAG.getNode(
6274         ISD::PREALLOCATED_ARG, sdl,
6275         DAG.getVTList(TLI.getPointerTy(DAG.getDataLayout()), MVT::Other), Ops);
6276     setValue(&I, Res);
6277     DAG.setRoot(Res.getValue(1));
6278     return;
6279   }
6280   case Intrinsic::dbg_declare: {
6281     const auto &DI = cast<DbgDeclareInst>(I);
6282     // Debug intrinsics are handled separately in assignment tracking mode.
6283     // Some intrinsics are handled right after Argument lowering.
6284     if (AssignmentTrackingEnabled ||
6285         FuncInfo.PreprocessedDbgDeclares.count(&DI))
6286       return;
6287     LLVM_DEBUG(dbgs() << "SelectionDAG visiting dbg_declare: " << DI << "\n");
6288     DILocalVariable *Variable = DI.getVariable();
6289     DIExpression *Expression = DI.getExpression();
6290     dropDanglingDebugInfo(Variable, Expression);
6291     // Assume dbg.declare can not currently use DIArgList, i.e.
6292     // it is non-variadic.
6293     assert(!DI.hasArgList() && "Only dbg.value should currently use DIArgList");
6294     handleDebugDeclare(DI.getVariableLocationOp(0), Variable, Expression,
6295                        DI.getDebugLoc());
6296     return;
6297   }
6298   case Intrinsic::dbg_label: {
6299     const DbgLabelInst &DI = cast<DbgLabelInst>(I);
6300     DILabel *Label = DI.getLabel();
6301     assert(Label && "Missing label");
6302 
6303     SDDbgLabel *SDV;
6304     SDV = DAG.getDbgLabel(Label, dl, SDNodeOrder);
6305     DAG.AddDbgLabel(SDV);
6306     return;
6307   }
6308   case Intrinsic::dbg_assign: {
6309     // Debug intrinsics are handled seperately in assignment tracking mode.
6310     if (AssignmentTrackingEnabled)
6311       return;
6312     // If assignment tracking hasn't been enabled then fall through and treat
6313     // the dbg.assign as a dbg.value.
6314     [[fallthrough]];
6315   }
6316   case Intrinsic::dbg_value: {
6317     // Debug intrinsics are handled seperately in assignment tracking mode.
6318     if (AssignmentTrackingEnabled)
6319       return;
6320     const DbgValueInst &DI = cast<DbgValueInst>(I);
6321     assert(DI.getVariable() && "Missing variable");
6322 
6323     DILocalVariable *Variable = DI.getVariable();
6324     DIExpression *Expression = DI.getExpression();
6325     dropDanglingDebugInfo(Variable, Expression);
6326 
6327     if (visitEntryValueDbgValue(DI))
6328       return;
6329 
6330     if (DI.isKillLocation()) {
6331       handleKillDebugValue(Variable, Expression, DI.getDebugLoc(), SDNodeOrder);
6332       return;
6333     }
6334 
6335     SmallVector<Value *, 4> Values(DI.getValues());
6336     if (Values.empty())
6337       return;
6338 
6339     bool IsVariadic = DI.hasArgList();
6340     if (!handleDebugValue(Values, Variable, Expression, DI.getDebugLoc(),
6341                           SDNodeOrder, IsVariadic))
6342       addDanglingDebugInfo(Values, Variable, Expression, IsVariadic,
6343                            DI.getDebugLoc(), SDNodeOrder);
6344     return;
6345   }
6346 
6347   case Intrinsic::eh_typeid_for: {
6348     // Find the type id for the given typeinfo.
6349     GlobalValue *GV = ExtractTypeInfo(I.getArgOperand(0));
6350     unsigned TypeID = DAG.getMachineFunction().getTypeIDFor(GV);
6351     Res = DAG.getConstant(TypeID, sdl, MVT::i32);
6352     setValue(&I, Res);
6353     return;
6354   }
6355 
6356   case Intrinsic::eh_return_i32:
6357   case Intrinsic::eh_return_i64:
6358     DAG.getMachineFunction().setCallsEHReturn(true);
6359     DAG.setRoot(DAG.getNode(ISD::EH_RETURN, sdl,
6360                             MVT::Other,
6361                             getControlRoot(),
6362                             getValue(I.getArgOperand(0)),
6363                             getValue(I.getArgOperand(1))));
6364     return;
6365   case Intrinsic::eh_unwind_init:
6366     DAG.getMachineFunction().setCallsUnwindInit(true);
6367     return;
6368   case Intrinsic::eh_dwarf_cfa:
6369     setValue(&I, DAG.getNode(ISD::EH_DWARF_CFA, sdl,
6370                              TLI.getPointerTy(DAG.getDataLayout()),
6371                              getValue(I.getArgOperand(0))));
6372     return;
6373   case Intrinsic::eh_sjlj_callsite: {
6374     MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
6375     ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(0));
6376     assert(MMI.getCurrentCallSite() == 0 && "Overlapping call sites!");
6377 
6378     MMI.setCurrentCallSite(CI->getZExtValue());
6379     return;
6380   }
6381   case Intrinsic::eh_sjlj_functioncontext: {
6382     // Get and store the index of the function context.
6383     MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
6384     AllocaInst *FnCtx =
6385       cast<AllocaInst>(I.getArgOperand(0)->stripPointerCasts());
6386     int FI = FuncInfo.StaticAllocaMap[FnCtx];
6387     MFI.setFunctionContextIndex(FI);
6388     return;
6389   }
6390   case Intrinsic::eh_sjlj_setjmp: {
6391     SDValue Ops[2];
6392     Ops[0] = getRoot();
6393     Ops[1] = getValue(I.getArgOperand(0));
6394     SDValue Op = DAG.getNode(ISD::EH_SJLJ_SETJMP, sdl,
6395                              DAG.getVTList(MVT::i32, MVT::Other), Ops);
6396     setValue(&I, Op.getValue(0));
6397     DAG.setRoot(Op.getValue(1));
6398     return;
6399   }
6400   case Intrinsic::eh_sjlj_longjmp:
6401     DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, sdl, MVT::Other,
6402                             getRoot(), getValue(I.getArgOperand(0))));
6403     return;
6404   case Intrinsic::eh_sjlj_setup_dispatch:
6405     DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_SETUP_DISPATCH, sdl, MVT::Other,
6406                             getRoot()));
6407     return;
6408   case Intrinsic::masked_gather:
6409     visitMaskedGather(I);
6410     return;
6411   case Intrinsic::masked_load:
6412     visitMaskedLoad(I);
6413     return;
6414   case Intrinsic::masked_scatter:
6415     visitMaskedScatter(I);
6416     return;
6417   case Intrinsic::masked_store:
6418     visitMaskedStore(I);
6419     return;
6420   case Intrinsic::masked_expandload:
6421     visitMaskedLoad(I, true /* IsExpanding */);
6422     return;
6423   case Intrinsic::masked_compressstore:
6424     visitMaskedStore(I, true /* IsCompressing */);
6425     return;
6426   case Intrinsic::powi:
6427     setValue(&I, ExpandPowI(sdl, getValue(I.getArgOperand(0)),
6428                             getValue(I.getArgOperand(1)), DAG));
6429     return;
6430   case Intrinsic::log:
6431     setValue(&I, expandLog(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6432     return;
6433   case Intrinsic::log2:
6434     setValue(&I,
6435              expandLog2(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6436     return;
6437   case Intrinsic::log10:
6438     setValue(&I,
6439              expandLog10(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6440     return;
6441   case Intrinsic::exp:
6442     setValue(&I, expandExp(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6443     return;
6444   case Intrinsic::exp2:
6445     setValue(&I,
6446              expandExp2(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6447     return;
6448   case Intrinsic::pow:
6449     setValue(&I, expandPow(sdl, getValue(I.getArgOperand(0)),
6450                            getValue(I.getArgOperand(1)), DAG, TLI, Flags));
6451     return;
6452   case Intrinsic::sqrt:
6453   case Intrinsic::fabs:
6454   case Intrinsic::sin:
6455   case Intrinsic::cos:
6456   case Intrinsic::exp10:
6457   case Intrinsic::floor:
6458   case Intrinsic::ceil:
6459   case Intrinsic::trunc:
6460   case Intrinsic::rint:
6461   case Intrinsic::nearbyint:
6462   case Intrinsic::round:
6463   case Intrinsic::roundeven:
6464   case Intrinsic::canonicalize: {
6465     unsigned Opcode;
6466     switch (Intrinsic) {
6467     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
6468     case Intrinsic::sqrt:      Opcode = ISD::FSQRT;      break;
6469     case Intrinsic::fabs:      Opcode = ISD::FABS;       break;
6470     case Intrinsic::sin:       Opcode = ISD::FSIN;       break;
6471     case Intrinsic::cos:       Opcode = ISD::FCOS;       break;
6472     case Intrinsic::exp10:     Opcode = ISD::FEXP10;     break;
6473     case Intrinsic::floor:     Opcode = ISD::FFLOOR;     break;
6474     case Intrinsic::ceil:      Opcode = ISD::FCEIL;      break;
6475     case Intrinsic::trunc:     Opcode = ISD::FTRUNC;     break;
6476     case Intrinsic::rint:      Opcode = ISD::FRINT;      break;
6477     case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break;
6478     case Intrinsic::round:     Opcode = ISD::FROUND;     break;
6479     case Intrinsic::roundeven: Opcode = ISD::FROUNDEVEN; break;
6480     case Intrinsic::canonicalize: Opcode = ISD::FCANONICALIZE; break;
6481     }
6482 
6483     setValue(&I, DAG.getNode(Opcode, sdl,
6484                              getValue(I.getArgOperand(0)).getValueType(),
6485                              getValue(I.getArgOperand(0)), Flags));
6486     return;
6487   }
6488   case Intrinsic::lround:
6489   case Intrinsic::llround:
6490   case Intrinsic::lrint:
6491   case Intrinsic::llrint: {
6492     unsigned Opcode;
6493     switch (Intrinsic) {
6494     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
6495     case Intrinsic::lround:  Opcode = ISD::LROUND;  break;
6496     case Intrinsic::llround: Opcode = ISD::LLROUND; break;
6497     case Intrinsic::lrint:   Opcode = ISD::LRINT;   break;
6498     case Intrinsic::llrint:  Opcode = ISD::LLRINT;  break;
6499     }
6500 
6501     EVT RetVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6502     setValue(&I, DAG.getNode(Opcode, sdl, RetVT,
6503                              getValue(I.getArgOperand(0))));
6504     return;
6505   }
6506   case Intrinsic::minnum:
6507     setValue(&I, DAG.getNode(ISD::FMINNUM, sdl,
6508                              getValue(I.getArgOperand(0)).getValueType(),
6509                              getValue(I.getArgOperand(0)),
6510                              getValue(I.getArgOperand(1)), Flags));
6511     return;
6512   case Intrinsic::maxnum:
6513     setValue(&I, DAG.getNode(ISD::FMAXNUM, sdl,
6514                              getValue(I.getArgOperand(0)).getValueType(),
6515                              getValue(I.getArgOperand(0)),
6516                              getValue(I.getArgOperand(1)), Flags));
6517     return;
6518   case Intrinsic::minimum:
6519     setValue(&I, DAG.getNode(ISD::FMINIMUM, sdl,
6520                              getValue(I.getArgOperand(0)).getValueType(),
6521                              getValue(I.getArgOperand(0)),
6522                              getValue(I.getArgOperand(1)), Flags));
6523     return;
6524   case Intrinsic::maximum:
6525     setValue(&I, DAG.getNode(ISD::FMAXIMUM, sdl,
6526                              getValue(I.getArgOperand(0)).getValueType(),
6527                              getValue(I.getArgOperand(0)),
6528                              getValue(I.getArgOperand(1)), Flags));
6529     return;
6530   case Intrinsic::copysign:
6531     setValue(&I, DAG.getNode(ISD::FCOPYSIGN, sdl,
6532                              getValue(I.getArgOperand(0)).getValueType(),
6533                              getValue(I.getArgOperand(0)),
6534                              getValue(I.getArgOperand(1)), Flags));
6535     return;
6536   case Intrinsic::ldexp:
6537     setValue(&I, DAG.getNode(ISD::FLDEXP, sdl,
6538                              getValue(I.getArgOperand(0)).getValueType(),
6539                              getValue(I.getArgOperand(0)),
6540                              getValue(I.getArgOperand(1)), Flags));
6541     return;
6542   case Intrinsic::frexp: {
6543     SmallVector<EVT, 2> ValueVTs;
6544     ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);
6545     SDVTList VTs = DAG.getVTList(ValueVTs);
6546     setValue(&I,
6547              DAG.getNode(ISD::FFREXP, sdl, VTs, getValue(I.getArgOperand(0))));
6548     return;
6549   }
6550   case Intrinsic::arithmetic_fence: {
6551     setValue(&I, DAG.getNode(ISD::ARITH_FENCE, sdl,
6552                              getValue(I.getArgOperand(0)).getValueType(),
6553                              getValue(I.getArgOperand(0)), Flags));
6554     return;
6555   }
6556   case Intrinsic::fma:
6557     setValue(&I, DAG.getNode(
6558                      ISD::FMA, sdl, getValue(I.getArgOperand(0)).getValueType(),
6559                      getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)),
6560                      getValue(I.getArgOperand(2)), Flags));
6561     return;
6562 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC)                         \
6563   case Intrinsic::INTRINSIC:
6564 #include "llvm/IR/ConstrainedOps.def"
6565     visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(I));
6566     return;
6567 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
6568 #include "llvm/IR/VPIntrinsics.def"
6569     visitVectorPredicationIntrinsic(cast<VPIntrinsic>(I));
6570     return;
6571   case Intrinsic::fptrunc_round: {
6572     // Get the last argument, the metadata and convert it to an integer in the
6573     // call
6574     Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(1))->getMetadata();
6575     std::optional<RoundingMode> RoundMode =
6576         convertStrToRoundingMode(cast<MDString>(MD)->getString());
6577 
6578     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6579 
6580     // Propagate fast-math-flags from IR to node(s).
6581     SDNodeFlags Flags;
6582     Flags.copyFMF(*cast<FPMathOperator>(&I));
6583     SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
6584 
6585     SDValue Result;
6586     Result = DAG.getNode(
6587         ISD::FPTRUNC_ROUND, sdl, VT, getValue(I.getArgOperand(0)),
6588         DAG.getTargetConstant((int)*RoundMode, sdl,
6589                               TLI.getPointerTy(DAG.getDataLayout())));
6590     setValue(&I, Result);
6591 
6592     return;
6593   }
6594   case Intrinsic::fmuladd: {
6595     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6596     if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
6597         TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT)) {
6598       setValue(&I, DAG.getNode(ISD::FMA, sdl,
6599                                getValue(I.getArgOperand(0)).getValueType(),
6600                                getValue(I.getArgOperand(0)),
6601                                getValue(I.getArgOperand(1)),
6602                                getValue(I.getArgOperand(2)), Flags));
6603     } else {
6604       // TODO: Intrinsic calls should have fast-math-flags.
6605       SDValue Mul = DAG.getNode(
6606           ISD::FMUL, sdl, getValue(I.getArgOperand(0)).getValueType(),
6607           getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)), Flags);
6608       SDValue Add = DAG.getNode(ISD::FADD, sdl,
6609                                 getValue(I.getArgOperand(0)).getValueType(),
6610                                 Mul, getValue(I.getArgOperand(2)), Flags);
6611       setValue(&I, Add);
6612     }
6613     return;
6614   }
6615   case Intrinsic::convert_to_fp16:
6616     setValue(&I, DAG.getNode(ISD::BITCAST, sdl, MVT::i16,
6617                              DAG.getNode(ISD::FP_ROUND, sdl, MVT::f16,
6618                                          getValue(I.getArgOperand(0)),
6619                                          DAG.getTargetConstant(0, sdl,
6620                                                                MVT::i32))));
6621     return;
6622   case Intrinsic::convert_from_fp16:
6623     setValue(&I, DAG.getNode(ISD::FP_EXTEND, sdl,
6624                              TLI.getValueType(DAG.getDataLayout(), I.getType()),
6625                              DAG.getNode(ISD::BITCAST, sdl, MVT::f16,
6626                                          getValue(I.getArgOperand(0)))));
6627     return;
6628   case Intrinsic::fptosi_sat: {
6629     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6630     setValue(&I, DAG.getNode(ISD::FP_TO_SINT_SAT, sdl, VT,
6631                              getValue(I.getArgOperand(0)),
6632                              DAG.getValueType(VT.getScalarType())));
6633     return;
6634   }
6635   case Intrinsic::fptoui_sat: {
6636     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6637     setValue(&I, DAG.getNode(ISD::FP_TO_UINT_SAT, sdl, VT,
6638                              getValue(I.getArgOperand(0)),
6639                              DAG.getValueType(VT.getScalarType())));
6640     return;
6641   }
6642   case Intrinsic::set_rounding:
6643     Res = DAG.getNode(ISD::SET_ROUNDING, sdl, MVT::Other,
6644                       {getRoot(), getValue(I.getArgOperand(0))});
6645     setValue(&I, Res);
6646     DAG.setRoot(Res.getValue(0));
6647     return;
6648   case Intrinsic::is_fpclass: {
6649     const DataLayout DLayout = DAG.getDataLayout();
6650     EVT DestVT = TLI.getValueType(DLayout, I.getType());
6651     EVT ArgVT = TLI.getValueType(DLayout, I.getArgOperand(0)->getType());
6652     FPClassTest Test = static_cast<FPClassTest>(
6653         cast<ConstantInt>(I.getArgOperand(1))->getZExtValue());
6654     MachineFunction &MF = DAG.getMachineFunction();
6655     const Function &F = MF.getFunction();
6656     SDValue Op = getValue(I.getArgOperand(0));
6657     SDNodeFlags Flags;
6658     Flags.setNoFPExcept(
6659         !F.getAttributes().hasFnAttr(llvm::Attribute::StrictFP));
6660     // If ISD::IS_FPCLASS should be expanded, do it right now, because the
6661     // expansion can use illegal types. Making expansion early allows
6662     // legalizing these types prior to selection.
6663     if (!TLI.isOperationLegalOrCustom(ISD::IS_FPCLASS, ArgVT)) {
6664       SDValue Result = TLI.expandIS_FPCLASS(DestVT, Op, Test, Flags, sdl, DAG);
6665       setValue(&I, Result);
6666       return;
6667     }
6668 
6669     SDValue Check = DAG.getTargetConstant(Test, sdl, MVT::i32);
6670     SDValue V = DAG.getNode(ISD::IS_FPCLASS, sdl, DestVT, {Op, Check}, Flags);
6671     setValue(&I, V);
6672     return;
6673   }
6674   case Intrinsic::get_fpenv: {
6675     const DataLayout DLayout = DAG.getDataLayout();
6676     EVT EnvVT = TLI.getValueType(DLayout, I.getType());
6677     Align TempAlign = DAG.getEVTAlign(EnvVT);
6678     SDValue Chain = getRoot();
6679     // Use GET_FPENV if it is legal or custom. Otherwise use memory-based node
6680     // and temporary storage in stack.
6681     if (TLI.isOperationLegalOrCustom(ISD::GET_FPENV, EnvVT)) {
6682       Res = DAG.getNode(
6683           ISD::GET_FPENV, sdl,
6684           DAG.getVTList(TLI.getValueType(DAG.getDataLayout(), I.getType()),
6685                         MVT::Other),
6686           Chain);
6687     } else {
6688       SDValue Temp = DAG.CreateStackTemporary(EnvVT, TempAlign.value());
6689       int SPFI = cast<FrameIndexSDNode>(Temp.getNode())->getIndex();
6690       auto MPI =
6691           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
6692       MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
6693           MPI, MachineMemOperand::MOStore, MemoryLocation::UnknownSize,
6694           TempAlign);
6695       Chain = DAG.getGetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
6696       Res = DAG.getLoad(EnvVT, sdl, Chain, Temp, MPI);
6697     }
6698     setValue(&I, Res);
6699     DAG.setRoot(Res.getValue(1));
6700     return;
6701   }
6702   case Intrinsic::set_fpenv: {
6703     const DataLayout DLayout = DAG.getDataLayout();
6704     SDValue Env = getValue(I.getArgOperand(0));
6705     EVT EnvVT = Env.getValueType();
6706     Align TempAlign = DAG.getEVTAlign(EnvVT);
6707     SDValue Chain = getRoot();
6708     // If SET_FPENV is custom or legal, use it. Otherwise use loading
6709     // environment from memory.
6710     if (TLI.isOperationLegalOrCustom(ISD::SET_FPENV, EnvVT)) {
6711       Chain = DAG.getNode(ISD::SET_FPENV, sdl, MVT::Other, Chain, Env);
6712     } else {
6713       // Allocate space in stack, copy environment bits into it and use this
6714       // memory in SET_FPENV_MEM.
6715       SDValue Temp = DAG.CreateStackTemporary(EnvVT, TempAlign.value());
6716       int SPFI = cast<FrameIndexSDNode>(Temp.getNode())->getIndex();
6717       auto MPI =
6718           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
6719       Chain = DAG.getStore(Chain, sdl, Env, Temp, MPI, TempAlign,
6720                            MachineMemOperand::MOStore);
6721       MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
6722           MPI, MachineMemOperand::MOLoad, MemoryLocation::UnknownSize,
6723           TempAlign);
6724       Chain = DAG.getSetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
6725     }
6726     DAG.setRoot(Chain);
6727     return;
6728   }
6729   case Intrinsic::reset_fpenv:
6730     DAG.setRoot(DAG.getNode(ISD::RESET_FPENV, sdl, MVT::Other, getRoot()));
6731     return;
6732   case Intrinsic::get_fpmode:
6733     Res = DAG.getNode(
6734         ISD::GET_FPMODE, sdl,
6735         DAG.getVTList(TLI.getValueType(DAG.getDataLayout(), I.getType()),
6736                       MVT::Other),
6737         DAG.getRoot());
6738     setValue(&I, Res);
6739     DAG.setRoot(Res.getValue(1));
6740     return;
6741   case Intrinsic::set_fpmode:
6742     Res = DAG.getNode(ISD::SET_FPMODE, sdl, MVT::Other, {DAG.getRoot()},
6743                       getValue(I.getArgOperand(0)));
6744     DAG.setRoot(Res);
6745     return;
6746   case Intrinsic::reset_fpmode: {
6747     Res = DAG.getNode(ISD::RESET_FPMODE, sdl, MVT::Other, getRoot());
6748     DAG.setRoot(Res);
6749     return;
6750   }
6751   case Intrinsic::pcmarker: {
6752     SDValue Tmp = getValue(I.getArgOperand(0));
6753     DAG.setRoot(DAG.getNode(ISD::PCMARKER, sdl, MVT::Other, getRoot(), Tmp));
6754     return;
6755   }
6756   case Intrinsic::readcyclecounter: {
6757     SDValue Op = getRoot();
6758     Res = DAG.getNode(ISD::READCYCLECOUNTER, sdl,
6759                       DAG.getVTList(MVT::i64, MVT::Other), Op);
6760     setValue(&I, Res);
6761     DAG.setRoot(Res.getValue(1));
6762     return;
6763   }
6764   case Intrinsic::bitreverse:
6765     setValue(&I, DAG.getNode(ISD::BITREVERSE, sdl,
6766                              getValue(I.getArgOperand(0)).getValueType(),
6767                              getValue(I.getArgOperand(0))));
6768     return;
6769   case Intrinsic::bswap:
6770     setValue(&I, DAG.getNode(ISD::BSWAP, sdl,
6771                              getValue(I.getArgOperand(0)).getValueType(),
6772                              getValue(I.getArgOperand(0))));
6773     return;
6774   case Intrinsic::cttz: {
6775     SDValue Arg = getValue(I.getArgOperand(0));
6776     ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
6777     EVT Ty = Arg.getValueType();
6778     setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_UNDEF,
6779                              sdl, Ty, Arg));
6780     return;
6781   }
6782   case Intrinsic::ctlz: {
6783     SDValue Arg = getValue(I.getArgOperand(0));
6784     ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
6785     EVT Ty = Arg.getValueType();
6786     setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_UNDEF,
6787                              sdl, Ty, Arg));
6788     return;
6789   }
6790   case Intrinsic::ctpop: {
6791     SDValue Arg = getValue(I.getArgOperand(0));
6792     EVT Ty = Arg.getValueType();
6793     setValue(&I, DAG.getNode(ISD::CTPOP, sdl, Ty, Arg));
6794     return;
6795   }
6796   case Intrinsic::fshl:
6797   case Intrinsic::fshr: {
6798     bool IsFSHL = Intrinsic == Intrinsic::fshl;
6799     SDValue X = getValue(I.getArgOperand(0));
6800     SDValue Y = getValue(I.getArgOperand(1));
6801     SDValue Z = getValue(I.getArgOperand(2));
6802     EVT VT = X.getValueType();
6803 
6804     if (X == Y) {
6805       auto RotateOpcode = IsFSHL ? ISD::ROTL : ISD::ROTR;
6806       setValue(&I, DAG.getNode(RotateOpcode, sdl, VT, X, Z));
6807     } else {
6808       auto FunnelOpcode = IsFSHL ? ISD::FSHL : ISD::FSHR;
6809       setValue(&I, DAG.getNode(FunnelOpcode, sdl, VT, X, Y, Z));
6810     }
6811     return;
6812   }
6813   case Intrinsic::sadd_sat: {
6814     SDValue Op1 = getValue(I.getArgOperand(0));
6815     SDValue Op2 = getValue(I.getArgOperand(1));
6816     setValue(&I, DAG.getNode(ISD::SADDSAT, sdl, Op1.getValueType(), Op1, Op2));
6817     return;
6818   }
6819   case Intrinsic::uadd_sat: {
6820     SDValue Op1 = getValue(I.getArgOperand(0));
6821     SDValue Op2 = getValue(I.getArgOperand(1));
6822     setValue(&I, DAG.getNode(ISD::UADDSAT, sdl, Op1.getValueType(), Op1, Op2));
6823     return;
6824   }
6825   case Intrinsic::ssub_sat: {
6826     SDValue Op1 = getValue(I.getArgOperand(0));
6827     SDValue Op2 = getValue(I.getArgOperand(1));
6828     setValue(&I, DAG.getNode(ISD::SSUBSAT, sdl, Op1.getValueType(), Op1, Op2));
6829     return;
6830   }
6831   case Intrinsic::usub_sat: {
6832     SDValue Op1 = getValue(I.getArgOperand(0));
6833     SDValue Op2 = getValue(I.getArgOperand(1));
6834     setValue(&I, DAG.getNode(ISD::USUBSAT, sdl, Op1.getValueType(), Op1, Op2));
6835     return;
6836   }
6837   case Intrinsic::sshl_sat: {
6838     SDValue Op1 = getValue(I.getArgOperand(0));
6839     SDValue Op2 = getValue(I.getArgOperand(1));
6840     setValue(&I, DAG.getNode(ISD::SSHLSAT, sdl, Op1.getValueType(), Op1, Op2));
6841     return;
6842   }
6843   case Intrinsic::ushl_sat: {
6844     SDValue Op1 = getValue(I.getArgOperand(0));
6845     SDValue Op2 = getValue(I.getArgOperand(1));
6846     setValue(&I, DAG.getNode(ISD::USHLSAT, sdl, Op1.getValueType(), Op1, Op2));
6847     return;
6848   }
6849   case Intrinsic::smul_fix:
6850   case Intrinsic::umul_fix:
6851   case Intrinsic::smul_fix_sat:
6852   case Intrinsic::umul_fix_sat: {
6853     SDValue Op1 = getValue(I.getArgOperand(0));
6854     SDValue Op2 = getValue(I.getArgOperand(1));
6855     SDValue Op3 = getValue(I.getArgOperand(2));
6856     setValue(&I, DAG.getNode(FixedPointIntrinsicToOpcode(Intrinsic), sdl,
6857                              Op1.getValueType(), Op1, Op2, Op3));
6858     return;
6859   }
6860   case Intrinsic::sdiv_fix:
6861   case Intrinsic::udiv_fix:
6862   case Intrinsic::sdiv_fix_sat:
6863   case Intrinsic::udiv_fix_sat: {
6864     SDValue Op1 = getValue(I.getArgOperand(0));
6865     SDValue Op2 = getValue(I.getArgOperand(1));
6866     SDValue Op3 = getValue(I.getArgOperand(2));
6867     setValue(&I, expandDivFix(FixedPointIntrinsicToOpcode(Intrinsic), sdl,
6868                               Op1, Op2, Op3, DAG, TLI));
6869     return;
6870   }
6871   case Intrinsic::smax: {
6872     SDValue Op1 = getValue(I.getArgOperand(0));
6873     SDValue Op2 = getValue(I.getArgOperand(1));
6874     setValue(&I, DAG.getNode(ISD::SMAX, sdl, Op1.getValueType(), Op1, Op2));
6875     return;
6876   }
6877   case Intrinsic::smin: {
6878     SDValue Op1 = getValue(I.getArgOperand(0));
6879     SDValue Op2 = getValue(I.getArgOperand(1));
6880     setValue(&I, DAG.getNode(ISD::SMIN, sdl, Op1.getValueType(), Op1, Op2));
6881     return;
6882   }
6883   case Intrinsic::umax: {
6884     SDValue Op1 = getValue(I.getArgOperand(0));
6885     SDValue Op2 = getValue(I.getArgOperand(1));
6886     setValue(&I, DAG.getNode(ISD::UMAX, sdl, Op1.getValueType(), Op1, Op2));
6887     return;
6888   }
6889   case Intrinsic::umin: {
6890     SDValue Op1 = getValue(I.getArgOperand(0));
6891     SDValue Op2 = getValue(I.getArgOperand(1));
6892     setValue(&I, DAG.getNode(ISD::UMIN, sdl, Op1.getValueType(), Op1, Op2));
6893     return;
6894   }
6895   case Intrinsic::abs: {
6896     // TODO: Preserve "int min is poison" arg in SDAG?
6897     SDValue Op1 = getValue(I.getArgOperand(0));
6898     setValue(&I, DAG.getNode(ISD::ABS, sdl, Op1.getValueType(), Op1));
6899     return;
6900   }
6901   case Intrinsic::stacksave: {
6902     SDValue Op = getRoot();
6903     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6904     Res = DAG.getNode(ISD::STACKSAVE, sdl, DAG.getVTList(VT, MVT::Other), Op);
6905     setValue(&I, Res);
6906     DAG.setRoot(Res.getValue(1));
6907     return;
6908   }
6909   case Intrinsic::stackrestore:
6910     Res = getValue(I.getArgOperand(0));
6911     DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other, getRoot(), Res));
6912     return;
6913   case Intrinsic::get_dynamic_area_offset: {
6914     SDValue Op = getRoot();
6915     EVT PtrTy = TLI.getFrameIndexTy(DAG.getDataLayout());
6916     EVT ResTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
6917     // Result type for @llvm.get.dynamic.area.offset should match PtrTy for
6918     // target.
6919     if (PtrTy.getFixedSizeInBits() < ResTy.getFixedSizeInBits())
6920       report_fatal_error("Wrong result type for @llvm.get.dynamic.area.offset"
6921                          " intrinsic!");
6922     Res = DAG.getNode(ISD::GET_DYNAMIC_AREA_OFFSET, sdl, DAG.getVTList(ResTy),
6923                       Op);
6924     DAG.setRoot(Op);
6925     setValue(&I, Res);
6926     return;
6927   }
6928   case Intrinsic::stackguard: {
6929     MachineFunction &MF = DAG.getMachineFunction();
6930     const Module &M = *MF.getFunction().getParent();
6931     SDValue Chain = getRoot();
6932     if (TLI.useLoadStackGuardNode()) {
6933       Res = getLoadStackGuard(DAG, sdl, Chain);
6934     } else {
6935       EVT PtrTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
6936       const Value *Global = TLI.getSDagStackGuard(M);
6937       Align Align = DAG.getDataLayout().getPrefTypeAlign(Global->getType());
6938       Res = DAG.getLoad(PtrTy, sdl, Chain, getValue(Global),
6939                         MachinePointerInfo(Global, 0), Align,
6940                         MachineMemOperand::MOVolatile);
6941     }
6942     if (TLI.useStackGuardXorFP())
6943       Res = TLI.emitStackGuardXorFP(DAG, Res, sdl);
6944     DAG.setRoot(Chain);
6945     setValue(&I, Res);
6946     return;
6947   }
6948   case Intrinsic::stackprotector: {
6949     // Emit code into the DAG to store the stack guard onto the stack.
6950     MachineFunction &MF = DAG.getMachineFunction();
6951     MachineFrameInfo &MFI = MF.getFrameInfo();
6952     SDValue Src, Chain = getRoot();
6953 
6954     if (TLI.useLoadStackGuardNode())
6955       Src = getLoadStackGuard(DAG, sdl, Chain);
6956     else
6957       Src = getValue(I.getArgOperand(0));   // The guard's value.
6958 
6959     AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
6960 
6961     int FI = FuncInfo.StaticAllocaMap[Slot];
6962     MFI.setStackProtectorIndex(FI);
6963     EVT PtrTy = TLI.getFrameIndexTy(DAG.getDataLayout());
6964 
6965     SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
6966 
6967     // Store the stack protector onto the stack.
6968     Res = DAG.getStore(
6969         Chain, sdl, Src, FIN,
6970         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
6971         MaybeAlign(), MachineMemOperand::MOVolatile);
6972     setValue(&I, Res);
6973     DAG.setRoot(Res);
6974     return;
6975   }
6976   case Intrinsic::objectsize:
6977     llvm_unreachable("llvm.objectsize.* should have been lowered already");
6978 
6979   case Intrinsic::is_constant:
6980     llvm_unreachable("llvm.is.constant.* should have been lowered already");
6981 
6982   case Intrinsic::annotation:
6983   case Intrinsic::ptr_annotation:
6984   case Intrinsic::launder_invariant_group:
6985   case Intrinsic::strip_invariant_group:
6986     // Drop the intrinsic, but forward the value
6987     setValue(&I, getValue(I.getOperand(0)));
6988     return;
6989 
6990   case Intrinsic::assume:
6991   case Intrinsic::experimental_noalias_scope_decl:
6992   case Intrinsic::var_annotation:
6993   case Intrinsic::sideeffect:
6994     // Discard annotate attributes, noalias scope declarations, assumptions, and
6995     // artificial side-effects.
6996     return;
6997 
6998   case Intrinsic::codeview_annotation: {
6999     // Emit a label associated with this metadata.
7000     MachineFunction &MF = DAG.getMachineFunction();
7001     MCSymbol *Label =
7002         MF.getMMI().getContext().createTempSymbol("annotation", true);
7003     Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(0))->getMetadata();
7004     MF.addCodeViewAnnotation(Label, cast<MDNode>(MD));
7005     Res = DAG.getLabelNode(ISD::ANNOTATION_LABEL, sdl, getRoot(), Label);
7006     DAG.setRoot(Res);
7007     return;
7008   }
7009 
7010   case Intrinsic::init_trampoline: {
7011     const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts());
7012 
7013     SDValue Ops[6];
7014     Ops[0] = getRoot();
7015     Ops[1] = getValue(I.getArgOperand(0));
7016     Ops[2] = getValue(I.getArgOperand(1));
7017     Ops[3] = getValue(I.getArgOperand(2));
7018     Ops[4] = DAG.getSrcValue(I.getArgOperand(0));
7019     Ops[5] = DAG.getSrcValue(F);
7020 
7021     Res = DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other, Ops);
7022 
7023     DAG.setRoot(Res);
7024     return;
7025   }
7026   case Intrinsic::adjust_trampoline:
7027     setValue(&I, DAG.getNode(ISD::ADJUST_TRAMPOLINE, sdl,
7028                              TLI.getPointerTy(DAG.getDataLayout()),
7029                              getValue(I.getArgOperand(0))));
7030     return;
7031   case Intrinsic::gcroot: {
7032     assert(DAG.getMachineFunction().getFunction().hasGC() &&
7033            "only valid in functions with gc specified, enforced by Verifier");
7034     assert(GFI && "implied by previous");
7035     const Value *Alloca = I.getArgOperand(0)->stripPointerCasts();
7036     const Constant *TypeMap = cast<Constant>(I.getArgOperand(1));
7037 
7038     FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
7039     GFI->addStackRoot(FI->getIndex(), TypeMap);
7040     return;
7041   }
7042   case Intrinsic::gcread:
7043   case Intrinsic::gcwrite:
7044     llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
7045   case Intrinsic::get_rounding:
7046     Res = DAG.getNode(ISD::GET_ROUNDING, sdl, {MVT::i32, MVT::Other}, getRoot());
7047     setValue(&I, Res);
7048     DAG.setRoot(Res.getValue(1));
7049     return;
7050 
7051   case Intrinsic::expect:
7052     // Just replace __builtin_expect(exp, c) with EXP.
7053     setValue(&I, getValue(I.getArgOperand(0)));
7054     return;
7055 
7056   case Intrinsic::ubsantrap:
7057   case Intrinsic::debugtrap:
7058   case Intrinsic::trap: {
7059     StringRef TrapFuncName =
7060         I.getAttributes().getFnAttr("trap-func-name").getValueAsString();
7061     if (TrapFuncName.empty()) {
7062       switch (Intrinsic) {
7063       case Intrinsic::trap:
7064         DAG.setRoot(DAG.getNode(ISD::TRAP, sdl, MVT::Other, getRoot()));
7065         break;
7066       case Intrinsic::debugtrap:
7067         DAG.setRoot(DAG.getNode(ISD::DEBUGTRAP, sdl, MVT::Other, getRoot()));
7068         break;
7069       case Intrinsic::ubsantrap:
7070         DAG.setRoot(DAG.getNode(
7071             ISD::UBSANTRAP, sdl, MVT::Other, getRoot(),
7072             DAG.getTargetConstant(
7073                 cast<ConstantInt>(I.getArgOperand(0))->getZExtValue(), sdl,
7074                 MVT::i32)));
7075         break;
7076       default: llvm_unreachable("unknown trap intrinsic");
7077       }
7078       return;
7079     }
7080     TargetLowering::ArgListTy Args;
7081     if (Intrinsic == Intrinsic::ubsantrap) {
7082       Args.push_back(TargetLoweringBase::ArgListEntry());
7083       Args[0].Val = I.getArgOperand(0);
7084       Args[0].Node = getValue(Args[0].Val);
7085       Args[0].Ty = Args[0].Val->getType();
7086     }
7087 
7088     TargetLowering::CallLoweringInfo CLI(DAG);
7089     CLI.setDebugLoc(sdl).setChain(getRoot()).setLibCallee(
7090         CallingConv::C, I.getType(),
7091         DAG.getExternalSymbol(TrapFuncName.data(),
7092                               TLI.getPointerTy(DAG.getDataLayout())),
7093         std::move(Args));
7094 
7095     std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
7096     DAG.setRoot(Result.second);
7097     return;
7098   }
7099 
7100   case Intrinsic::uadd_with_overflow:
7101   case Intrinsic::sadd_with_overflow:
7102   case Intrinsic::usub_with_overflow:
7103   case Intrinsic::ssub_with_overflow:
7104   case Intrinsic::umul_with_overflow:
7105   case Intrinsic::smul_with_overflow: {
7106     ISD::NodeType Op;
7107     switch (Intrinsic) {
7108     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
7109     case Intrinsic::uadd_with_overflow: Op = ISD::UADDO; break;
7110     case Intrinsic::sadd_with_overflow: Op = ISD::SADDO; break;
7111     case Intrinsic::usub_with_overflow: Op = ISD::USUBO; break;
7112     case Intrinsic::ssub_with_overflow: Op = ISD::SSUBO; break;
7113     case Intrinsic::umul_with_overflow: Op = ISD::UMULO; break;
7114     case Intrinsic::smul_with_overflow: Op = ISD::SMULO; break;
7115     }
7116     SDValue Op1 = getValue(I.getArgOperand(0));
7117     SDValue Op2 = getValue(I.getArgOperand(1));
7118 
7119     EVT ResultVT = Op1.getValueType();
7120     EVT OverflowVT = MVT::i1;
7121     if (ResultVT.isVector())
7122       OverflowVT = EVT::getVectorVT(
7123           *Context, OverflowVT, ResultVT.getVectorElementCount());
7124 
7125     SDVTList VTs = DAG.getVTList(ResultVT, OverflowVT);
7126     setValue(&I, DAG.getNode(Op, sdl, VTs, Op1, Op2));
7127     return;
7128   }
7129   case Intrinsic::prefetch: {
7130     SDValue Ops[5];
7131     unsigned rw = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
7132     auto Flags = rw == 0 ? MachineMemOperand::MOLoad :MachineMemOperand::MOStore;
7133     Ops[0] = DAG.getRoot();
7134     Ops[1] = getValue(I.getArgOperand(0));
7135     Ops[2] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(1)), sdl,
7136                                    MVT::i32);
7137     Ops[3] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(2)), sdl,
7138                                    MVT::i32);
7139     Ops[4] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(3)), sdl,
7140                                    MVT::i32);
7141     SDValue Result = DAG.getMemIntrinsicNode(
7142         ISD::PREFETCH, sdl, DAG.getVTList(MVT::Other), Ops,
7143         EVT::getIntegerVT(*Context, 8), MachinePointerInfo(I.getArgOperand(0)),
7144         /* align */ std::nullopt, Flags);
7145 
7146     // Chain the prefetch in parallel with any pending loads, to stay out of
7147     // the way of later optimizations.
7148     PendingLoads.push_back(Result);
7149     Result = getRoot();
7150     DAG.setRoot(Result);
7151     return;
7152   }
7153   case Intrinsic::lifetime_start:
7154   case Intrinsic::lifetime_end: {
7155     bool IsStart = (Intrinsic == Intrinsic::lifetime_start);
7156     // Stack coloring is not enabled in O0, discard region information.
7157     if (TM.getOptLevel() == CodeGenOptLevel::None)
7158       return;
7159 
7160     const int64_t ObjectSize =
7161         cast<ConstantInt>(I.getArgOperand(0))->getSExtValue();
7162     Value *const ObjectPtr = I.getArgOperand(1);
7163     SmallVector<const Value *, 4> Allocas;
7164     getUnderlyingObjects(ObjectPtr, Allocas);
7165 
7166     for (const Value *Alloca : Allocas) {
7167       const AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(Alloca);
7168 
7169       // Could not find an Alloca.
7170       if (!LifetimeObject)
7171         continue;
7172 
7173       // First check that the Alloca is static, otherwise it won't have a
7174       // valid frame index.
7175       auto SI = FuncInfo.StaticAllocaMap.find(LifetimeObject);
7176       if (SI == FuncInfo.StaticAllocaMap.end())
7177         return;
7178 
7179       const int FrameIndex = SI->second;
7180       int64_t Offset;
7181       if (GetPointerBaseWithConstantOffset(
7182               ObjectPtr, Offset, DAG.getDataLayout()) != LifetimeObject)
7183         Offset = -1; // Cannot determine offset from alloca to lifetime object.
7184       Res = DAG.getLifetimeNode(IsStart, sdl, getRoot(), FrameIndex, ObjectSize,
7185                                 Offset);
7186       DAG.setRoot(Res);
7187     }
7188     return;
7189   }
7190   case Intrinsic::pseudoprobe: {
7191     auto Guid = cast<ConstantInt>(I.getArgOperand(0))->getZExtValue();
7192     auto Index = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
7193     auto Attr = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue();
7194     Res = DAG.getPseudoProbeNode(sdl, getRoot(), Guid, Index, Attr);
7195     DAG.setRoot(Res);
7196     return;
7197   }
7198   case Intrinsic::invariant_start:
7199     // Discard region information.
7200     setValue(&I,
7201              DAG.getUNDEF(TLI.getValueType(DAG.getDataLayout(), I.getType())));
7202     return;
7203   case Intrinsic::invariant_end:
7204     // Discard region information.
7205     return;
7206   case Intrinsic::clear_cache:
7207     /// FunctionName may be null.
7208     if (const char *FunctionName = TLI.getClearCacheBuiltinName())
7209       lowerCallToExternalSymbol(I, FunctionName);
7210     return;
7211   case Intrinsic::donothing:
7212   case Intrinsic::seh_try_begin:
7213   case Intrinsic::seh_scope_begin:
7214   case Intrinsic::seh_try_end:
7215   case Intrinsic::seh_scope_end:
7216     // ignore
7217     return;
7218   case Intrinsic::experimental_stackmap:
7219     visitStackmap(I);
7220     return;
7221   case Intrinsic::experimental_patchpoint_void:
7222   case Intrinsic::experimental_patchpoint_i64:
7223     visitPatchpoint(I);
7224     return;
7225   case Intrinsic::experimental_gc_statepoint:
7226     LowerStatepoint(cast<GCStatepointInst>(I));
7227     return;
7228   case Intrinsic::experimental_gc_result:
7229     visitGCResult(cast<GCResultInst>(I));
7230     return;
7231   case Intrinsic::experimental_gc_relocate:
7232     visitGCRelocate(cast<GCRelocateInst>(I));
7233     return;
7234   case Intrinsic::instrprof_cover:
7235     llvm_unreachable("instrprof failed to lower a cover");
7236   case Intrinsic::instrprof_increment:
7237     llvm_unreachable("instrprof failed to lower an increment");
7238   case Intrinsic::instrprof_timestamp:
7239     llvm_unreachable("instrprof failed to lower a timestamp");
7240   case Intrinsic::instrprof_value_profile:
7241     llvm_unreachable("instrprof failed to lower a value profiling call");
7242   case Intrinsic::instrprof_mcdc_parameters:
7243     llvm_unreachable("instrprof failed to lower mcdc parameters");
7244   case Intrinsic::instrprof_mcdc_tvbitmap_update:
7245     llvm_unreachable("instrprof failed to lower an mcdc tvbitmap update");
7246   case Intrinsic::instrprof_mcdc_condbitmap_update:
7247     llvm_unreachable("instrprof failed to lower an mcdc condbitmap update");
7248   case Intrinsic::localescape: {
7249     MachineFunction &MF = DAG.getMachineFunction();
7250     const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
7251 
7252     // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission
7253     // is the same on all targets.
7254     for (unsigned Idx = 0, E = I.arg_size(); Idx < E; ++Idx) {
7255       Value *Arg = I.getArgOperand(Idx)->stripPointerCasts();
7256       if (isa<ConstantPointerNull>(Arg))
7257         continue; // Skip null pointers. They represent a hole in index space.
7258       AllocaInst *Slot = cast<AllocaInst>(Arg);
7259       assert(FuncInfo.StaticAllocaMap.count(Slot) &&
7260              "can only escape static allocas");
7261       int FI = FuncInfo.StaticAllocaMap[Slot];
7262       MCSymbol *FrameAllocSym =
7263           MF.getMMI().getContext().getOrCreateFrameAllocSymbol(
7264               GlobalValue::dropLLVMManglingEscape(MF.getName()), Idx);
7265       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, dl,
7266               TII->get(TargetOpcode::LOCAL_ESCAPE))
7267           .addSym(FrameAllocSym)
7268           .addFrameIndex(FI);
7269     }
7270 
7271     return;
7272   }
7273 
7274   case Intrinsic::localrecover: {
7275     // i8* @llvm.localrecover(i8* %fn, i8* %fp, i32 %idx)
7276     MachineFunction &MF = DAG.getMachineFunction();
7277 
7278     // Get the symbol that defines the frame offset.
7279     auto *Fn = cast<Function>(I.getArgOperand(0)->stripPointerCasts());
7280     auto *Idx = cast<ConstantInt>(I.getArgOperand(2));
7281     unsigned IdxVal =
7282         unsigned(Idx->getLimitedValue(std::numeric_limits<int>::max()));
7283     MCSymbol *FrameAllocSym =
7284         MF.getMMI().getContext().getOrCreateFrameAllocSymbol(
7285             GlobalValue::dropLLVMManglingEscape(Fn->getName()), IdxVal);
7286 
7287     Value *FP = I.getArgOperand(1);
7288     SDValue FPVal = getValue(FP);
7289     EVT PtrVT = FPVal.getValueType();
7290 
7291     // Create a MCSymbol for the label to avoid any target lowering
7292     // that would make this PC relative.
7293     SDValue OffsetSym = DAG.getMCSymbol(FrameAllocSym, PtrVT);
7294     SDValue OffsetVal =
7295         DAG.getNode(ISD::LOCAL_RECOVER, sdl, PtrVT, OffsetSym);
7296 
7297     // Add the offset to the FP.
7298     SDValue Add = DAG.getMemBasePlusOffset(FPVal, OffsetVal, sdl);
7299     setValue(&I, Add);
7300 
7301     return;
7302   }
7303 
7304   case Intrinsic::eh_exceptionpointer:
7305   case Intrinsic::eh_exceptioncode: {
7306     // Get the exception pointer vreg, copy from it, and resize it to fit.
7307     const auto *CPI = cast<CatchPadInst>(I.getArgOperand(0));
7308     MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
7309     const TargetRegisterClass *PtrRC = TLI.getRegClassFor(PtrVT);
7310     unsigned VReg = FuncInfo.getCatchPadExceptionPointerVReg(CPI, PtrRC);
7311     SDValue N = DAG.getCopyFromReg(DAG.getEntryNode(), sdl, VReg, PtrVT);
7312     if (Intrinsic == Intrinsic::eh_exceptioncode)
7313       N = DAG.getZExtOrTrunc(N, sdl, MVT::i32);
7314     setValue(&I, N);
7315     return;
7316   }
7317   case Intrinsic::xray_customevent: {
7318     // Here we want to make sure that the intrinsic behaves as if it has a
7319     // specific calling convention.
7320     const auto &Triple = DAG.getTarget().getTargetTriple();
7321     if (!Triple.isAArch64(64) && Triple.getArch() != Triple::x86_64)
7322       return;
7323 
7324     SmallVector<SDValue, 8> Ops;
7325 
7326     // We want to say that we always want the arguments in registers.
7327     SDValue LogEntryVal = getValue(I.getArgOperand(0));
7328     SDValue StrSizeVal = getValue(I.getArgOperand(1));
7329     SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7330     SDValue Chain = getRoot();
7331     Ops.push_back(LogEntryVal);
7332     Ops.push_back(StrSizeVal);
7333     Ops.push_back(Chain);
7334 
7335     // We need to enforce the calling convention for the callsite, so that
7336     // argument ordering is enforced correctly, and that register allocation can
7337     // see that some registers may be assumed clobbered and have to preserve
7338     // them across calls to the intrinsic.
7339     MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHABLE_EVENT_CALL,
7340                                            sdl, NodeTys, Ops);
7341     SDValue patchableNode = SDValue(MN, 0);
7342     DAG.setRoot(patchableNode);
7343     setValue(&I, patchableNode);
7344     return;
7345   }
7346   case Intrinsic::xray_typedevent: {
7347     // Here we want to make sure that the intrinsic behaves as if it has a
7348     // specific calling convention.
7349     const auto &Triple = DAG.getTarget().getTargetTriple();
7350     if (!Triple.isAArch64(64) && Triple.getArch() != Triple::x86_64)
7351       return;
7352 
7353     SmallVector<SDValue, 8> Ops;
7354 
7355     // We want to say that we always want the arguments in registers.
7356     // It's unclear to me how manipulating the selection DAG here forces callers
7357     // to provide arguments in registers instead of on the stack.
7358     SDValue LogTypeId = getValue(I.getArgOperand(0));
7359     SDValue LogEntryVal = getValue(I.getArgOperand(1));
7360     SDValue StrSizeVal = getValue(I.getArgOperand(2));
7361     SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7362     SDValue Chain = getRoot();
7363     Ops.push_back(LogTypeId);
7364     Ops.push_back(LogEntryVal);
7365     Ops.push_back(StrSizeVal);
7366     Ops.push_back(Chain);
7367 
7368     // We need to enforce the calling convention for the callsite, so that
7369     // argument ordering is enforced correctly, and that register allocation can
7370     // see that some registers may be assumed clobbered and have to preserve
7371     // them across calls to the intrinsic.
7372     MachineSDNode *MN = DAG.getMachineNode(
7373         TargetOpcode::PATCHABLE_TYPED_EVENT_CALL, sdl, NodeTys, Ops);
7374     SDValue patchableNode = SDValue(MN, 0);
7375     DAG.setRoot(patchableNode);
7376     setValue(&I, patchableNode);
7377     return;
7378   }
7379   case Intrinsic::experimental_deoptimize:
7380     LowerDeoptimizeCall(&I);
7381     return;
7382   case Intrinsic::experimental_stepvector:
7383     visitStepVector(I);
7384     return;
7385   case Intrinsic::vector_reduce_fadd:
7386   case Intrinsic::vector_reduce_fmul:
7387   case Intrinsic::vector_reduce_add:
7388   case Intrinsic::vector_reduce_mul:
7389   case Intrinsic::vector_reduce_and:
7390   case Intrinsic::vector_reduce_or:
7391   case Intrinsic::vector_reduce_xor:
7392   case Intrinsic::vector_reduce_smax:
7393   case Intrinsic::vector_reduce_smin:
7394   case Intrinsic::vector_reduce_umax:
7395   case Intrinsic::vector_reduce_umin:
7396   case Intrinsic::vector_reduce_fmax:
7397   case Intrinsic::vector_reduce_fmin:
7398   case Intrinsic::vector_reduce_fmaximum:
7399   case Intrinsic::vector_reduce_fminimum:
7400     visitVectorReduce(I, Intrinsic);
7401     return;
7402 
7403   case Intrinsic::icall_branch_funnel: {
7404     SmallVector<SDValue, 16> Ops;
7405     Ops.push_back(getValue(I.getArgOperand(0)));
7406 
7407     int64_t Offset;
7408     auto *Base = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
7409         I.getArgOperand(1), Offset, DAG.getDataLayout()));
7410     if (!Base)
7411       report_fatal_error(
7412           "llvm.icall.branch.funnel operand must be a GlobalValue");
7413     Ops.push_back(DAG.getTargetGlobalAddress(Base, sdl, MVT::i64, 0));
7414 
7415     struct BranchFunnelTarget {
7416       int64_t Offset;
7417       SDValue Target;
7418     };
7419     SmallVector<BranchFunnelTarget, 8> Targets;
7420 
7421     for (unsigned Op = 1, N = I.arg_size(); Op != N; Op += 2) {
7422       auto *ElemBase = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
7423           I.getArgOperand(Op), Offset, DAG.getDataLayout()));
7424       if (ElemBase != Base)
7425         report_fatal_error("all llvm.icall.branch.funnel operands must refer "
7426                            "to the same GlobalValue");
7427 
7428       SDValue Val = getValue(I.getArgOperand(Op + 1));
7429       auto *GA = dyn_cast<GlobalAddressSDNode>(Val);
7430       if (!GA)
7431         report_fatal_error(
7432             "llvm.icall.branch.funnel operand must be a GlobalValue");
7433       Targets.push_back({Offset, DAG.getTargetGlobalAddress(
7434                                      GA->getGlobal(), sdl, Val.getValueType(),
7435                                      GA->getOffset())});
7436     }
7437     llvm::sort(Targets,
7438                [](const BranchFunnelTarget &T1, const BranchFunnelTarget &T2) {
7439                  return T1.Offset < T2.Offset;
7440                });
7441 
7442     for (auto &T : Targets) {
7443       Ops.push_back(DAG.getTargetConstant(T.Offset, sdl, MVT::i32));
7444       Ops.push_back(T.Target);
7445     }
7446 
7447     Ops.push_back(DAG.getRoot()); // Chain
7448     SDValue N(DAG.getMachineNode(TargetOpcode::ICALL_BRANCH_FUNNEL, sdl,
7449                                  MVT::Other, Ops),
7450               0);
7451     DAG.setRoot(N);
7452     setValue(&I, N);
7453     HasTailCall = true;
7454     return;
7455   }
7456 
7457   case Intrinsic::wasm_landingpad_index:
7458     // Information this intrinsic contained has been transferred to
7459     // MachineFunction in SelectionDAGISel::PrepareEHLandingPad. We can safely
7460     // delete it now.
7461     return;
7462 
7463   case Intrinsic::aarch64_settag:
7464   case Intrinsic::aarch64_settag_zero: {
7465     const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7466     bool ZeroMemory = Intrinsic == Intrinsic::aarch64_settag_zero;
7467     SDValue Val = TSI.EmitTargetCodeForSetTag(
7468         DAG, sdl, getRoot(), getValue(I.getArgOperand(0)),
7469         getValue(I.getArgOperand(1)), MachinePointerInfo(I.getArgOperand(0)),
7470         ZeroMemory);
7471     DAG.setRoot(Val);
7472     setValue(&I, Val);
7473     return;
7474   }
7475   case Intrinsic::amdgcn_cs_chain: {
7476     assert(I.arg_size() == 5 && "Additional args not supported yet");
7477     assert(cast<ConstantInt>(I.getOperand(4))->isZero() &&
7478            "Non-zero flags not supported yet");
7479 
7480     // At this point we don't care if it's amdgpu_cs_chain or
7481     // amdgpu_cs_chain_preserve.
7482     CallingConv::ID CC = CallingConv::AMDGPU_CS_Chain;
7483 
7484     Type *RetTy = I.getType();
7485     assert(RetTy->isVoidTy() && "Should not return");
7486 
7487     SDValue Callee = getValue(I.getOperand(0));
7488 
7489     // We only have 2 actual args: one for the SGPRs and one for the VGPRs.
7490     // We'll also tack the value of the EXEC mask at the end.
7491     TargetLowering::ArgListTy Args;
7492     Args.reserve(3);
7493 
7494     for (unsigned Idx : {2, 3, 1}) {
7495       TargetLowering::ArgListEntry Arg;
7496       Arg.Node = getValue(I.getOperand(Idx));
7497       Arg.Ty = I.getOperand(Idx)->getType();
7498       Arg.setAttributes(&I, Idx);
7499       Args.push_back(Arg);
7500     }
7501 
7502     assert(Args[0].IsInReg && "SGPR args should be marked inreg");
7503     assert(!Args[1].IsInReg && "VGPR args should not be marked inreg");
7504     Args[2].IsInReg = true; // EXEC should be inreg
7505 
7506     TargetLowering::CallLoweringInfo CLI(DAG);
7507     CLI.setDebugLoc(getCurSDLoc())
7508         .setChain(getRoot())
7509         .setCallee(CC, RetTy, Callee, std::move(Args))
7510         .setNoReturn(true)
7511         .setTailCall(true)
7512         .setConvergent(I.isConvergent());
7513     CLI.CB = &I;
7514     std::pair<SDValue, SDValue> Result =
7515         lowerInvokable(CLI, /*EHPadBB*/ nullptr);
7516     (void)Result;
7517     assert(!Result.first.getNode() && !Result.second.getNode() &&
7518            "Should've lowered as tail call");
7519 
7520     HasTailCall = true;
7521     return;
7522   }
7523   case Intrinsic::ptrmask: {
7524     SDValue Ptr = getValue(I.getOperand(0));
7525     SDValue Mask = getValue(I.getOperand(1));
7526 
7527     EVT PtrVT = Ptr.getValueType();
7528     assert(PtrVT == Mask.getValueType() &&
7529            "Pointers with different index type are not supported by SDAG");
7530     setValue(&I, DAG.getNode(ISD::AND, sdl, PtrVT, Ptr, Mask));
7531     return;
7532   }
7533   case Intrinsic::threadlocal_address: {
7534     setValue(&I, getValue(I.getOperand(0)));
7535     return;
7536   }
7537   case Intrinsic::get_active_lane_mask: {
7538     EVT CCVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7539     SDValue Index = getValue(I.getOperand(0));
7540     EVT ElementVT = Index.getValueType();
7541 
7542     if (!TLI.shouldExpandGetActiveLaneMask(CCVT, ElementVT)) {
7543       visitTargetIntrinsic(I, Intrinsic);
7544       return;
7545     }
7546 
7547     SDValue TripCount = getValue(I.getOperand(1));
7548     EVT VecTy = EVT::getVectorVT(*DAG.getContext(), ElementVT,
7549                                  CCVT.getVectorElementCount());
7550 
7551     SDValue VectorIndex = DAG.getSplat(VecTy, sdl, Index);
7552     SDValue VectorTripCount = DAG.getSplat(VecTy, sdl, TripCount);
7553     SDValue VectorStep = DAG.getStepVector(sdl, VecTy);
7554     SDValue VectorInduction = DAG.getNode(
7555         ISD::UADDSAT, sdl, VecTy, VectorIndex, VectorStep);
7556     SDValue SetCC = DAG.getSetCC(sdl, CCVT, VectorInduction,
7557                                  VectorTripCount, ISD::CondCode::SETULT);
7558     setValue(&I, SetCC);
7559     return;
7560   }
7561   case Intrinsic::experimental_get_vector_length: {
7562     assert(cast<ConstantInt>(I.getOperand(1))->getSExtValue() > 0 &&
7563            "Expected positive VF");
7564     unsigned VF = cast<ConstantInt>(I.getOperand(1))->getZExtValue();
7565     bool IsScalable = cast<ConstantInt>(I.getOperand(2))->isOne();
7566 
7567     SDValue Count = getValue(I.getOperand(0));
7568     EVT CountVT = Count.getValueType();
7569 
7570     if (!TLI.shouldExpandGetVectorLength(CountVT, VF, IsScalable)) {
7571       visitTargetIntrinsic(I, Intrinsic);
7572       return;
7573     }
7574 
7575     // Expand to a umin between the trip count and the maximum elements the type
7576     // can hold.
7577     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7578 
7579     // Extend the trip count to at least the result VT.
7580     if (CountVT.bitsLT(VT)) {
7581       Count = DAG.getNode(ISD::ZERO_EXTEND, sdl, VT, Count);
7582       CountVT = VT;
7583     }
7584 
7585     SDValue MaxEVL = DAG.getElementCount(sdl, CountVT,
7586                                          ElementCount::get(VF, IsScalable));
7587 
7588     SDValue UMin = DAG.getNode(ISD::UMIN, sdl, CountVT, Count, MaxEVL);
7589     // Clip to the result type if needed.
7590     SDValue Trunc = DAG.getNode(ISD::TRUNCATE, sdl, VT, UMin);
7591 
7592     setValue(&I, Trunc);
7593     return;
7594   }
7595   case Intrinsic::experimental_cttz_elts: {
7596     auto DL = getCurSDLoc();
7597     SDValue Op = getValue(I.getOperand(0));
7598     EVT OpVT = Op.getValueType();
7599 
7600     if (!TLI.shouldExpandCttzElements(OpVT)) {
7601       visitTargetIntrinsic(I, Intrinsic);
7602       return;
7603     }
7604 
7605     if (OpVT.getScalarType() != MVT::i1) {
7606       // Compare the input vector elements to zero & use to count trailing zeros
7607       SDValue AllZero = DAG.getConstant(0, DL, OpVT);
7608       OpVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
7609                               OpVT.getVectorElementCount());
7610       Op = DAG.getSetCC(DL, OpVT, Op, AllZero, ISD::SETNE);
7611     }
7612 
7613     // Find the smallest "sensible" element type to use for the expansion.
7614     ConstantRange CR(
7615         APInt(64, OpVT.getVectorElementCount().getKnownMinValue()));
7616     if (OpVT.isScalableVT())
7617       CR = CR.umul_sat(getVScaleRange(I.getCaller(), 64));
7618 
7619     // If the zero-is-poison flag is set, we can assume the upper limit
7620     // of the result is VF-1.
7621     if (!cast<ConstantSDNode>(getValue(I.getOperand(1)))->isZero())
7622       CR = CR.subtract(APInt(64, 1));
7623 
7624     unsigned EltWidth = I.getType()->getScalarSizeInBits();
7625     EltWidth = std::min(EltWidth, (unsigned)CR.getActiveBits());
7626     EltWidth = std::max(llvm::bit_ceil(EltWidth), (unsigned)8);
7627 
7628     MVT NewEltTy = MVT::getIntegerVT(EltWidth);
7629 
7630     // Create the new vector type & get the vector length
7631     EVT NewVT = EVT::getVectorVT(*DAG.getContext(), NewEltTy,
7632                                  OpVT.getVectorElementCount());
7633 
7634     SDValue VL =
7635         DAG.getElementCount(DL, NewEltTy, OpVT.getVectorElementCount());
7636 
7637     SDValue StepVec = DAG.getStepVector(DL, NewVT);
7638     SDValue SplatVL = DAG.getSplat(NewVT, DL, VL);
7639     SDValue StepVL = DAG.getNode(ISD::SUB, DL, NewVT, SplatVL, StepVec);
7640     SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, Op);
7641     SDValue And = DAG.getNode(ISD::AND, DL, NewVT, StepVL, Ext);
7642     SDValue Max = DAG.getNode(ISD::VECREDUCE_UMAX, DL, NewEltTy, And);
7643     SDValue Sub = DAG.getNode(ISD::SUB, DL, NewEltTy, VL, Max);
7644 
7645     EVT RetTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
7646     SDValue Ret = DAG.getZExtOrTrunc(Sub, DL, RetTy);
7647 
7648     setValue(&I, Ret);
7649     return;
7650   }
7651   case Intrinsic::vector_insert: {
7652     SDValue Vec = getValue(I.getOperand(0));
7653     SDValue SubVec = getValue(I.getOperand(1));
7654     SDValue Index = getValue(I.getOperand(2));
7655 
7656     // The intrinsic's index type is i64, but the SDNode requires an index type
7657     // suitable for the target. Convert the index as required.
7658     MVT VectorIdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
7659     if (Index.getValueType() != VectorIdxTy)
7660       Index = DAG.getVectorIdxConstant(
7661           cast<ConstantSDNode>(Index)->getZExtValue(), sdl);
7662 
7663     EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7664     setValue(&I, DAG.getNode(ISD::INSERT_SUBVECTOR, sdl, ResultVT, Vec, SubVec,
7665                              Index));
7666     return;
7667   }
7668   case Intrinsic::vector_extract: {
7669     SDValue Vec = getValue(I.getOperand(0));
7670     SDValue Index = getValue(I.getOperand(1));
7671     EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7672 
7673     // The intrinsic's index type is i64, but the SDNode requires an index type
7674     // suitable for the target. Convert the index as required.
7675     MVT VectorIdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
7676     if (Index.getValueType() != VectorIdxTy)
7677       Index = DAG.getVectorIdxConstant(
7678           cast<ConstantSDNode>(Index)->getZExtValue(), sdl);
7679 
7680     setValue(&I,
7681              DAG.getNode(ISD::EXTRACT_SUBVECTOR, sdl, ResultVT, Vec, Index));
7682     return;
7683   }
7684   case Intrinsic::experimental_vector_reverse:
7685     visitVectorReverse(I);
7686     return;
7687   case Intrinsic::experimental_vector_splice:
7688     visitVectorSplice(I);
7689     return;
7690   case Intrinsic::callbr_landingpad:
7691     visitCallBrLandingPad(I);
7692     return;
7693   case Intrinsic::experimental_vector_interleave2:
7694     visitVectorInterleave(I);
7695     return;
7696   case Intrinsic::experimental_vector_deinterleave2:
7697     visitVectorDeinterleave(I);
7698     return;
7699   }
7700 }
7701 
7702 void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
7703     const ConstrainedFPIntrinsic &FPI) {
7704   SDLoc sdl = getCurSDLoc();
7705 
7706   // We do not need to serialize constrained FP intrinsics against
7707   // each other or against (nonvolatile) loads, so they can be
7708   // chained like loads.
7709   SDValue Chain = DAG.getRoot();
7710   SmallVector<SDValue, 4> Opers;
7711   Opers.push_back(Chain);
7712   if (FPI.isUnaryOp()) {
7713     Opers.push_back(getValue(FPI.getArgOperand(0)));
7714   } else if (FPI.isTernaryOp()) {
7715     Opers.push_back(getValue(FPI.getArgOperand(0)));
7716     Opers.push_back(getValue(FPI.getArgOperand(1)));
7717     Opers.push_back(getValue(FPI.getArgOperand(2)));
7718   } else {
7719     Opers.push_back(getValue(FPI.getArgOperand(0)));
7720     Opers.push_back(getValue(FPI.getArgOperand(1)));
7721   }
7722 
7723   auto pushOutChain = [this](SDValue Result, fp::ExceptionBehavior EB) {
7724     assert(Result.getNode()->getNumValues() == 2);
7725 
7726     // Push node to the appropriate list so that future instructions can be
7727     // chained up correctly.
7728     SDValue OutChain = Result.getValue(1);
7729     switch (EB) {
7730     case fp::ExceptionBehavior::ebIgnore:
7731       // The only reason why ebIgnore nodes still need to be chained is that
7732       // they might depend on the current rounding mode, and therefore must
7733       // not be moved across instruction that may change that mode.
7734       [[fallthrough]];
7735     case fp::ExceptionBehavior::ebMayTrap:
7736       // These must not be moved across calls or instructions that may change
7737       // floating-point exception masks.
7738       PendingConstrainedFP.push_back(OutChain);
7739       break;
7740     case fp::ExceptionBehavior::ebStrict:
7741       // These must not be moved across calls or instructions that may change
7742       // floating-point exception masks or read floating-point exception flags.
7743       // In addition, they cannot be optimized out even if unused.
7744       PendingConstrainedFPStrict.push_back(OutChain);
7745       break;
7746     }
7747   };
7748 
7749   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7750   EVT VT = TLI.getValueType(DAG.getDataLayout(), FPI.getType());
7751   SDVTList VTs = DAG.getVTList(VT, MVT::Other);
7752   fp::ExceptionBehavior EB = *FPI.getExceptionBehavior();
7753 
7754   SDNodeFlags Flags;
7755   if (EB == fp::ExceptionBehavior::ebIgnore)
7756     Flags.setNoFPExcept(true);
7757 
7758   if (auto *FPOp = dyn_cast<FPMathOperator>(&FPI))
7759     Flags.copyFMF(*FPOp);
7760 
7761   unsigned Opcode;
7762   switch (FPI.getIntrinsicID()) {
7763   default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
7764 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
7765   case Intrinsic::INTRINSIC:                                                   \
7766     Opcode = ISD::STRICT_##DAGN;                                               \
7767     break;
7768 #include "llvm/IR/ConstrainedOps.def"
7769   case Intrinsic::experimental_constrained_fmuladd: {
7770     Opcode = ISD::STRICT_FMA;
7771     // Break fmuladd into fmul and fadd.
7772     if (TM.Options.AllowFPOpFusion == FPOpFusion::Strict ||
7773         !TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT)) {
7774       Opers.pop_back();
7775       SDValue Mul = DAG.getNode(ISD::STRICT_FMUL, sdl, VTs, Opers, Flags);
7776       pushOutChain(Mul, EB);
7777       Opcode = ISD::STRICT_FADD;
7778       Opers.clear();
7779       Opers.push_back(Mul.getValue(1));
7780       Opers.push_back(Mul.getValue(0));
7781       Opers.push_back(getValue(FPI.getArgOperand(2)));
7782     }
7783     break;
7784   }
7785   }
7786 
7787   // A few strict DAG nodes carry additional operands that are not
7788   // set up by the default code above.
7789   switch (Opcode) {
7790   default: break;
7791   case ISD::STRICT_FP_ROUND:
7792     Opers.push_back(
7793         DAG.getTargetConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout())));
7794     break;
7795   case ISD::STRICT_FSETCC:
7796   case ISD::STRICT_FSETCCS: {
7797     auto *FPCmp = dyn_cast<ConstrainedFPCmpIntrinsic>(&FPI);
7798     ISD::CondCode Condition = getFCmpCondCode(FPCmp->getPredicate());
7799     if (TM.Options.NoNaNsFPMath)
7800       Condition = getFCmpCodeWithoutNaN(Condition);
7801     Opers.push_back(DAG.getCondCode(Condition));
7802     break;
7803   }
7804   }
7805 
7806   SDValue Result = DAG.getNode(Opcode, sdl, VTs, Opers, Flags);
7807   pushOutChain(Result, EB);
7808 
7809   SDValue FPResult = Result.getValue(0);
7810   setValue(&FPI, FPResult);
7811 }
7812 
7813 static unsigned getISDForVPIntrinsic(const VPIntrinsic &VPIntrin) {
7814   std::optional<unsigned> ResOPC;
7815   switch (VPIntrin.getIntrinsicID()) {
7816   case Intrinsic::vp_ctlz: {
7817     bool IsZeroUndef = cast<ConstantInt>(VPIntrin.getArgOperand(1))->isOne();
7818     ResOPC = IsZeroUndef ? ISD::VP_CTLZ_ZERO_UNDEF : ISD::VP_CTLZ;
7819     break;
7820   }
7821   case Intrinsic::vp_cttz: {
7822     bool IsZeroUndef = cast<ConstantInt>(VPIntrin.getArgOperand(1))->isOne();
7823     ResOPC = IsZeroUndef ? ISD::VP_CTTZ_ZERO_UNDEF : ISD::VP_CTTZ;
7824     break;
7825   }
7826 #define HELPER_MAP_VPID_TO_VPSD(VPID, VPSD)                                    \
7827   case Intrinsic::VPID:                                                        \
7828     ResOPC = ISD::VPSD;                                                        \
7829     break;
7830 #include "llvm/IR/VPIntrinsics.def"
7831   }
7832 
7833   if (!ResOPC)
7834     llvm_unreachable(
7835         "Inconsistency: no SDNode available for this VPIntrinsic!");
7836 
7837   if (*ResOPC == ISD::VP_REDUCE_SEQ_FADD ||
7838       *ResOPC == ISD::VP_REDUCE_SEQ_FMUL) {
7839     if (VPIntrin.getFastMathFlags().allowReassoc())
7840       return *ResOPC == ISD::VP_REDUCE_SEQ_FADD ? ISD::VP_REDUCE_FADD
7841                                                 : ISD::VP_REDUCE_FMUL;
7842   }
7843 
7844   return *ResOPC;
7845 }
7846 
7847 void SelectionDAGBuilder::visitVPLoad(
7848     const VPIntrinsic &VPIntrin, EVT VT,
7849     const SmallVectorImpl<SDValue> &OpValues) {
7850   SDLoc DL = getCurSDLoc();
7851   Value *PtrOperand = VPIntrin.getArgOperand(0);
7852   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
7853   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
7854   const MDNode *Ranges = getRangeMetadata(VPIntrin);
7855   SDValue LD;
7856   // Do not serialize variable-length loads of constant memory with
7857   // anything.
7858   if (!Alignment)
7859     Alignment = DAG.getEVTAlign(VT);
7860   MemoryLocation ML = MemoryLocation::getAfter(PtrOperand, AAInfo);
7861   bool AddToChain = !AA || !AA->pointsToConstantMemory(ML);
7862   SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
7863   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
7864       MachinePointerInfo(PtrOperand), MachineMemOperand::MOLoad,
7865       MemoryLocation::UnknownSize, *Alignment, AAInfo, Ranges);
7866   LD = DAG.getLoadVP(VT, DL, InChain, OpValues[0], OpValues[1], OpValues[2],
7867                      MMO, false /*IsExpanding */);
7868   if (AddToChain)
7869     PendingLoads.push_back(LD.getValue(1));
7870   setValue(&VPIntrin, LD);
7871 }
7872 
7873 void SelectionDAGBuilder::visitVPGather(
7874     const VPIntrinsic &VPIntrin, EVT VT,
7875     const SmallVectorImpl<SDValue> &OpValues) {
7876   SDLoc DL = getCurSDLoc();
7877   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7878   Value *PtrOperand = VPIntrin.getArgOperand(0);
7879   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
7880   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
7881   const MDNode *Ranges = getRangeMetadata(VPIntrin);
7882   SDValue LD;
7883   if (!Alignment)
7884     Alignment = DAG.getEVTAlign(VT.getScalarType());
7885   unsigned AS =
7886     PtrOperand->getType()->getScalarType()->getPointerAddressSpace();
7887   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
7888      MachinePointerInfo(AS), MachineMemOperand::MOLoad,
7889      MemoryLocation::UnknownSize, *Alignment, AAInfo, Ranges);
7890   SDValue Base, Index, Scale;
7891   ISD::MemIndexType IndexType;
7892   bool UniformBase = getUniformBase(PtrOperand, Base, Index, IndexType, Scale,
7893                                     this, VPIntrin.getParent(),
7894                                     VT.getScalarStoreSize());
7895   if (!UniformBase) {
7896     Base = DAG.getConstant(0, DL, TLI.getPointerTy(DAG.getDataLayout()));
7897     Index = getValue(PtrOperand);
7898     IndexType = ISD::SIGNED_SCALED;
7899     Scale = DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout()));
7900   }
7901   EVT IdxVT = Index.getValueType();
7902   EVT EltTy = IdxVT.getVectorElementType();
7903   if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
7904     EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
7905     Index = DAG.getNode(ISD::SIGN_EXTEND, DL, NewIdxVT, Index);
7906   }
7907   LD = DAG.getGatherVP(
7908       DAG.getVTList(VT, MVT::Other), VT, DL,
7909       {DAG.getRoot(), Base, Index, Scale, OpValues[1], OpValues[2]}, MMO,
7910       IndexType);
7911   PendingLoads.push_back(LD.getValue(1));
7912   setValue(&VPIntrin, LD);
7913 }
7914 
7915 void SelectionDAGBuilder::visitVPStore(
7916     const VPIntrinsic &VPIntrin, const SmallVectorImpl<SDValue> &OpValues) {
7917   SDLoc DL = getCurSDLoc();
7918   Value *PtrOperand = VPIntrin.getArgOperand(1);
7919   EVT VT = OpValues[0].getValueType();
7920   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
7921   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
7922   SDValue ST;
7923   if (!Alignment)
7924     Alignment = DAG.getEVTAlign(VT);
7925   SDValue Ptr = OpValues[1];
7926   SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
7927   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
7928       MachinePointerInfo(PtrOperand), MachineMemOperand::MOStore,
7929       MemoryLocation::UnknownSize, *Alignment, AAInfo);
7930   ST = DAG.getStoreVP(getMemoryRoot(), DL, OpValues[0], Ptr, Offset,
7931                       OpValues[2], OpValues[3], VT, MMO, ISD::UNINDEXED,
7932                       /* IsTruncating */ false, /*IsCompressing*/ false);
7933   DAG.setRoot(ST);
7934   setValue(&VPIntrin, ST);
7935 }
7936 
7937 void SelectionDAGBuilder::visitVPScatter(
7938     const VPIntrinsic &VPIntrin, const SmallVectorImpl<SDValue> &OpValues) {
7939   SDLoc DL = getCurSDLoc();
7940   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7941   Value *PtrOperand = VPIntrin.getArgOperand(1);
7942   EVT VT = OpValues[0].getValueType();
7943   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
7944   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
7945   SDValue ST;
7946   if (!Alignment)
7947     Alignment = DAG.getEVTAlign(VT.getScalarType());
7948   unsigned AS =
7949       PtrOperand->getType()->getScalarType()->getPointerAddressSpace();
7950   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
7951       MachinePointerInfo(AS), MachineMemOperand::MOStore,
7952       MemoryLocation::UnknownSize, *Alignment, AAInfo);
7953   SDValue Base, Index, Scale;
7954   ISD::MemIndexType IndexType;
7955   bool UniformBase = getUniformBase(PtrOperand, Base, Index, IndexType, Scale,
7956                                     this, VPIntrin.getParent(),
7957                                     VT.getScalarStoreSize());
7958   if (!UniformBase) {
7959     Base = DAG.getConstant(0, DL, TLI.getPointerTy(DAG.getDataLayout()));
7960     Index = getValue(PtrOperand);
7961     IndexType = ISD::SIGNED_SCALED;
7962     Scale =
7963       DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout()));
7964   }
7965   EVT IdxVT = Index.getValueType();
7966   EVT EltTy = IdxVT.getVectorElementType();
7967   if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
7968     EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
7969     Index = DAG.getNode(ISD::SIGN_EXTEND, DL, NewIdxVT, Index);
7970   }
7971   ST = DAG.getScatterVP(DAG.getVTList(MVT::Other), VT, DL,
7972                         {getMemoryRoot(), OpValues[0], Base, Index, Scale,
7973                          OpValues[2], OpValues[3]},
7974                         MMO, IndexType);
7975   DAG.setRoot(ST);
7976   setValue(&VPIntrin, ST);
7977 }
7978 
7979 void SelectionDAGBuilder::visitVPStridedLoad(
7980     const VPIntrinsic &VPIntrin, EVT VT,
7981     const SmallVectorImpl<SDValue> &OpValues) {
7982   SDLoc DL = getCurSDLoc();
7983   Value *PtrOperand = VPIntrin.getArgOperand(0);
7984   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
7985   if (!Alignment)
7986     Alignment = DAG.getEVTAlign(VT.getScalarType());
7987   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
7988   const MDNode *Ranges = getRangeMetadata(VPIntrin);
7989   MemoryLocation ML = MemoryLocation::getAfter(PtrOperand, AAInfo);
7990   bool AddToChain = !AA || !AA->pointsToConstantMemory(ML);
7991   SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
7992   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
7993       MachinePointerInfo(PtrOperand), MachineMemOperand::MOLoad,
7994       MemoryLocation::UnknownSize, *Alignment, AAInfo, Ranges);
7995 
7996   SDValue LD = DAG.getStridedLoadVP(VT, DL, InChain, OpValues[0], OpValues[1],
7997                                     OpValues[2], OpValues[3], MMO,
7998                                     false /*IsExpanding*/);
7999 
8000   if (AddToChain)
8001     PendingLoads.push_back(LD.getValue(1));
8002   setValue(&VPIntrin, LD);
8003 }
8004 
8005 void SelectionDAGBuilder::visitVPStridedStore(
8006     const VPIntrinsic &VPIntrin, const SmallVectorImpl<SDValue> &OpValues) {
8007   SDLoc DL = getCurSDLoc();
8008   Value *PtrOperand = VPIntrin.getArgOperand(1);
8009   EVT VT = OpValues[0].getValueType();
8010   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
8011   if (!Alignment)
8012     Alignment = DAG.getEVTAlign(VT.getScalarType());
8013   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
8014   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
8015       MachinePointerInfo(PtrOperand), MachineMemOperand::MOStore,
8016       MemoryLocation::UnknownSize, *Alignment, AAInfo);
8017 
8018   SDValue ST = DAG.getStridedStoreVP(
8019       getMemoryRoot(), DL, OpValues[0], OpValues[1],
8020       DAG.getUNDEF(OpValues[1].getValueType()), OpValues[2], OpValues[3],
8021       OpValues[4], VT, MMO, ISD::UNINDEXED, /*IsTruncating*/ false,
8022       /*IsCompressing*/ false);
8023 
8024   DAG.setRoot(ST);
8025   setValue(&VPIntrin, ST);
8026 }
8027 
8028 void SelectionDAGBuilder::visitVPCmp(const VPCmpIntrinsic &VPIntrin) {
8029   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8030   SDLoc DL = getCurSDLoc();
8031 
8032   ISD::CondCode Condition;
8033   CmpInst::Predicate CondCode = VPIntrin.getPredicate();
8034   bool IsFP = VPIntrin.getOperand(0)->getType()->isFPOrFPVectorTy();
8035   if (IsFP) {
8036     // FIXME: Regular fcmps are FPMathOperators which may have fast-math (nnan)
8037     // flags, but calls that don't return floating-point types can't be
8038     // FPMathOperators, like vp.fcmp. This affects constrained fcmp too.
8039     Condition = getFCmpCondCode(CondCode);
8040     if (TM.Options.NoNaNsFPMath)
8041       Condition = getFCmpCodeWithoutNaN(Condition);
8042   } else {
8043     Condition = getICmpCondCode(CondCode);
8044   }
8045 
8046   SDValue Op1 = getValue(VPIntrin.getOperand(0));
8047   SDValue Op2 = getValue(VPIntrin.getOperand(1));
8048   // #2 is the condition code
8049   SDValue MaskOp = getValue(VPIntrin.getOperand(3));
8050   SDValue EVL = getValue(VPIntrin.getOperand(4));
8051   MVT EVLParamVT = TLI.getVPExplicitVectorLengthTy();
8052   assert(EVLParamVT.isScalarInteger() && EVLParamVT.bitsGE(MVT::i32) &&
8053          "Unexpected target EVL type");
8054   EVL = DAG.getNode(ISD::ZERO_EXTEND, DL, EVLParamVT, EVL);
8055 
8056   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
8057                                                         VPIntrin.getType());
8058   setValue(&VPIntrin,
8059            DAG.getSetCCVP(DL, DestVT, Op1, Op2, Condition, MaskOp, EVL));
8060 }
8061 
8062 void SelectionDAGBuilder::visitVectorPredicationIntrinsic(
8063     const VPIntrinsic &VPIntrin) {
8064   SDLoc DL = getCurSDLoc();
8065   unsigned Opcode = getISDForVPIntrinsic(VPIntrin);
8066 
8067   auto IID = VPIntrin.getIntrinsicID();
8068 
8069   if (const auto *CmpI = dyn_cast<VPCmpIntrinsic>(&VPIntrin))
8070     return visitVPCmp(*CmpI);
8071 
8072   SmallVector<EVT, 4> ValueVTs;
8073   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8074   ComputeValueVTs(TLI, DAG.getDataLayout(), VPIntrin.getType(), ValueVTs);
8075   SDVTList VTs = DAG.getVTList(ValueVTs);
8076 
8077   auto EVLParamPos = VPIntrinsic::getVectorLengthParamPos(IID);
8078 
8079   MVT EVLParamVT = TLI.getVPExplicitVectorLengthTy();
8080   assert(EVLParamVT.isScalarInteger() && EVLParamVT.bitsGE(MVT::i32) &&
8081          "Unexpected target EVL type");
8082 
8083   // Request operands.
8084   SmallVector<SDValue, 7> OpValues;
8085   for (unsigned I = 0; I < VPIntrin.arg_size(); ++I) {
8086     auto Op = getValue(VPIntrin.getArgOperand(I));
8087     if (I == EVLParamPos)
8088       Op = DAG.getNode(ISD::ZERO_EXTEND, DL, EVLParamVT, Op);
8089     OpValues.push_back(Op);
8090   }
8091 
8092   switch (Opcode) {
8093   default: {
8094     SDNodeFlags SDFlags;
8095     if (auto *FPMO = dyn_cast<FPMathOperator>(&VPIntrin))
8096       SDFlags.copyFMF(*FPMO);
8097     SDValue Result = DAG.getNode(Opcode, DL, VTs, OpValues, SDFlags);
8098     setValue(&VPIntrin, Result);
8099     break;
8100   }
8101   case ISD::VP_LOAD:
8102     visitVPLoad(VPIntrin, ValueVTs[0], OpValues);
8103     break;
8104   case ISD::VP_GATHER:
8105     visitVPGather(VPIntrin, ValueVTs[0], OpValues);
8106     break;
8107   case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
8108     visitVPStridedLoad(VPIntrin, ValueVTs[0], OpValues);
8109     break;
8110   case ISD::VP_STORE:
8111     visitVPStore(VPIntrin, OpValues);
8112     break;
8113   case ISD::VP_SCATTER:
8114     visitVPScatter(VPIntrin, OpValues);
8115     break;
8116   case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
8117     visitVPStridedStore(VPIntrin, OpValues);
8118     break;
8119   case ISD::VP_FMULADD: {
8120     assert(OpValues.size() == 5 && "Unexpected number of operands");
8121     SDNodeFlags SDFlags;
8122     if (auto *FPMO = dyn_cast<FPMathOperator>(&VPIntrin))
8123       SDFlags.copyFMF(*FPMO);
8124     if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
8125         TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), ValueVTs[0])) {
8126       setValue(&VPIntrin, DAG.getNode(ISD::VP_FMA, DL, VTs, OpValues, SDFlags));
8127     } else {
8128       SDValue Mul = DAG.getNode(
8129           ISD::VP_FMUL, DL, VTs,
8130           {OpValues[0], OpValues[1], OpValues[3], OpValues[4]}, SDFlags);
8131       SDValue Add =
8132           DAG.getNode(ISD::VP_FADD, DL, VTs,
8133                       {Mul, OpValues[2], OpValues[3], OpValues[4]}, SDFlags);
8134       setValue(&VPIntrin, Add);
8135     }
8136     break;
8137   }
8138   case ISD::VP_IS_FPCLASS: {
8139     const DataLayout DLayout = DAG.getDataLayout();
8140     EVT DestVT = TLI.getValueType(DLayout, VPIntrin.getType());
8141     auto Constant = cast<ConstantSDNode>(OpValues[1])->getZExtValue();
8142     SDValue Check = DAG.getTargetConstant(Constant, DL, MVT::i32);
8143     SDValue V = DAG.getNode(ISD::VP_IS_FPCLASS, DL, DestVT,
8144                             {OpValues[0], Check, OpValues[2], OpValues[3]});
8145     setValue(&VPIntrin, V);
8146     return;
8147   }
8148   case ISD::VP_INTTOPTR: {
8149     SDValue N = OpValues[0];
8150     EVT DestVT = TLI.getValueType(DAG.getDataLayout(), VPIntrin.getType());
8151     EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(), VPIntrin.getType());
8152     N = DAG.getVPPtrExtOrTrunc(getCurSDLoc(), DestVT, N, OpValues[1],
8153                                OpValues[2]);
8154     N = DAG.getVPZExtOrTrunc(getCurSDLoc(), PtrMemVT, N, OpValues[1],
8155                              OpValues[2]);
8156     setValue(&VPIntrin, N);
8157     break;
8158   }
8159   case ISD::VP_PTRTOINT: {
8160     SDValue N = OpValues[0];
8161     EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
8162                                                           VPIntrin.getType());
8163     EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(),
8164                                        VPIntrin.getOperand(0)->getType());
8165     N = DAG.getVPPtrExtOrTrunc(getCurSDLoc(), PtrMemVT, N, OpValues[1],
8166                                OpValues[2]);
8167     N = DAG.getVPZExtOrTrunc(getCurSDLoc(), DestVT, N, OpValues[1],
8168                              OpValues[2]);
8169     setValue(&VPIntrin, N);
8170     break;
8171   }
8172   case ISD::VP_ABS:
8173   case ISD::VP_CTLZ:
8174   case ISD::VP_CTLZ_ZERO_UNDEF:
8175   case ISD::VP_CTTZ:
8176   case ISD::VP_CTTZ_ZERO_UNDEF: {
8177     SDValue Result =
8178         DAG.getNode(Opcode, DL, VTs, {OpValues[0], OpValues[2], OpValues[3]});
8179     setValue(&VPIntrin, Result);
8180     break;
8181   }
8182   }
8183 }
8184 
8185 SDValue SelectionDAGBuilder::lowerStartEH(SDValue Chain,
8186                                           const BasicBlock *EHPadBB,
8187                                           MCSymbol *&BeginLabel) {
8188   MachineFunction &MF = DAG.getMachineFunction();
8189   MachineModuleInfo &MMI = MF.getMMI();
8190 
8191   // Insert a label before the invoke call to mark the try range.  This can be
8192   // used to detect deletion of the invoke via the MachineModuleInfo.
8193   BeginLabel = MMI.getContext().createTempSymbol();
8194 
8195   // For SjLj, keep track of which landing pads go with which invokes
8196   // so as to maintain the ordering of pads in the LSDA.
8197   unsigned CallSiteIndex = MMI.getCurrentCallSite();
8198   if (CallSiteIndex) {
8199     MF.setCallSiteBeginLabel(BeginLabel, CallSiteIndex);
8200     LPadToCallSiteMap[FuncInfo.MBBMap[EHPadBB]].push_back(CallSiteIndex);
8201 
8202     // Now that the call site is handled, stop tracking it.
8203     MMI.setCurrentCallSite(0);
8204   }
8205 
8206   return DAG.getEHLabel(getCurSDLoc(), Chain, BeginLabel);
8207 }
8208 
8209 SDValue SelectionDAGBuilder::lowerEndEH(SDValue Chain, const InvokeInst *II,
8210                                         const BasicBlock *EHPadBB,
8211                                         MCSymbol *BeginLabel) {
8212   assert(BeginLabel && "BeginLabel should've been set");
8213 
8214   MachineFunction &MF = DAG.getMachineFunction();
8215   MachineModuleInfo &MMI = MF.getMMI();
8216 
8217   // Insert a label at the end of the invoke call to mark the try range.  This
8218   // can be used to detect deletion of the invoke via the MachineModuleInfo.
8219   MCSymbol *EndLabel = MMI.getContext().createTempSymbol();
8220   Chain = DAG.getEHLabel(getCurSDLoc(), Chain, EndLabel);
8221 
8222   // Inform MachineModuleInfo of range.
8223   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
8224   // There is a platform (e.g. wasm) that uses funclet style IR but does not
8225   // actually use outlined funclets and their LSDA info style.
8226   if (MF.hasEHFunclets() && isFuncletEHPersonality(Pers)) {
8227     assert(II && "II should've been set");
8228     WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
8229     EHInfo->addIPToStateRange(II, BeginLabel, EndLabel);
8230   } else if (!isScopedEHPersonality(Pers)) {
8231     assert(EHPadBB);
8232     MF.addInvoke(FuncInfo.MBBMap[EHPadBB], BeginLabel, EndLabel);
8233   }
8234 
8235   return Chain;
8236 }
8237 
8238 std::pair<SDValue, SDValue>
8239 SelectionDAGBuilder::lowerInvokable(TargetLowering::CallLoweringInfo &CLI,
8240                                     const BasicBlock *EHPadBB) {
8241   MCSymbol *BeginLabel = nullptr;
8242 
8243   if (EHPadBB) {
8244     // Both PendingLoads and PendingExports must be flushed here;
8245     // this call might not return.
8246     (void)getRoot();
8247     DAG.setRoot(lowerStartEH(getControlRoot(), EHPadBB, BeginLabel));
8248     CLI.setChain(getRoot());
8249   }
8250 
8251   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8252   std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
8253 
8254   assert((CLI.IsTailCall || Result.second.getNode()) &&
8255          "Non-null chain expected with non-tail call!");
8256   assert((Result.second.getNode() || !Result.first.getNode()) &&
8257          "Null value expected with tail call!");
8258 
8259   if (!Result.second.getNode()) {
8260     // As a special case, a null chain means that a tail call has been emitted
8261     // and the DAG root is already updated.
8262     HasTailCall = true;
8263 
8264     // Since there's no actual continuation from this block, nothing can be
8265     // relying on us setting vregs for them.
8266     PendingExports.clear();
8267   } else {
8268     DAG.setRoot(Result.second);
8269   }
8270 
8271   if (EHPadBB) {
8272     DAG.setRoot(lowerEndEH(getRoot(), cast_or_null<InvokeInst>(CLI.CB), EHPadBB,
8273                            BeginLabel));
8274   }
8275 
8276   return Result;
8277 }
8278 
8279 void SelectionDAGBuilder::LowerCallTo(const CallBase &CB, SDValue Callee,
8280                                       bool isTailCall,
8281                                       bool isMustTailCall,
8282                                       const BasicBlock *EHPadBB) {
8283   auto &DL = DAG.getDataLayout();
8284   FunctionType *FTy = CB.getFunctionType();
8285   Type *RetTy = CB.getType();
8286 
8287   TargetLowering::ArgListTy Args;
8288   Args.reserve(CB.arg_size());
8289 
8290   const Value *SwiftErrorVal = nullptr;
8291   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8292 
8293   if (isTailCall) {
8294     // Avoid emitting tail calls in functions with the disable-tail-calls
8295     // attribute.
8296     auto *Caller = CB.getParent()->getParent();
8297     if (Caller->getFnAttribute("disable-tail-calls").getValueAsString() ==
8298         "true" && !isMustTailCall)
8299       isTailCall = false;
8300 
8301     // We can't tail call inside a function with a swifterror argument. Lowering
8302     // does not support this yet. It would have to move into the swifterror
8303     // register before the call.
8304     if (TLI.supportSwiftError() &&
8305         Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
8306       isTailCall = false;
8307   }
8308 
8309   for (auto I = CB.arg_begin(), E = CB.arg_end(); I != E; ++I) {
8310     TargetLowering::ArgListEntry Entry;
8311     const Value *V = *I;
8312 
8313     // Skip empty types
8314     if (V->getType()->isEmptyTy())
8315       continue;
8316 
8317     SDValue ArgNode = getValue(V);
8318     Entry.Node = ArgNode; Entry.Ty = V->getType();
8319 
8320     Entry.setAttributes(&CB, I - CB.arg_begin());
8321 
8322     // Use swifterror virtual register as input to the call.
8323     if (Entry.IsSwiftError && TLI.supportSwiftError()) {
8324       SwiftErrorVal = V;
8325       // We find the virtual register for the actual swifterror argument.
8326       // Instead of using the Value, we use the virtual register instead.
8327       Entry.Node =
8328           DAG.getRegister(SwiftError.getOrCreateVRegUseAt(&CB, FuncInfo.MBB, V),
8329                           EVT(TLI.getPointerTy(DL)));
8330     }
8331 
8332     Args.push_back(Entry);
8333 
8334     // If we have an explicit sret argument that is an Instruction, (i.e., it
8335     // might point to function-local memory), we can't meaningfully tail-call.
8336     if (Entry.IsSRet && isa<Instruction>(V))
8337       isTailCall = false;
8338   }
8339 
8340   // If call site has a cfguardtarget operand bundle, create and add an
8341   // additional ArgListEntry.
8342   if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_cfguardtarget)) {
8343     TargetLowering::ArgListEntry Entry;
8344     Value *V = Bundle->Inputs[0];
8345     SDValue ArgNode = getValue(V);
8346     Entry.Node = ArgNode;
8347     Entry.Ty = V->getType();
8348     Entry.IsCFGuardTarget = true;
8349     Args.push_back(Entry);
8350   }
8351 
8352   // Check if target-independent constraints permit a tail call here.
8353   // Target-dependent constraints are checked within TLI->LowerCallTo.
8354   if (isTailCall && !isInTailCallPosition(CB, DAG.getTarget()))
8355     isTailCall = false;
8356 
8357   // Disable tail calls if there is an swifterror argument. Targets have not
8358   // been updated to support tail calls.
8359   if (TLI.supportSwiftError() && SwiftErrorVal)
8360     isTailCall = false;
8361 
8362   ConstantInt *CFIType = nullptr;
8363   if (CB.isIndirectCall()) {
8364     if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_kcfi)) {
8365       if (!TLI.supportKCFIBundles())
8366         report_fatal_error(
8367             "Target doesn't support calls with kcfi operand bundles.");
8368       CFIType = cast<ConstantInt>(Bundle->Inputs[0]);
8369       assert(CFIType->getType()->isIntegerTy(32) && "Invalid CFI type");
8370     }
8371   }
8372 
8373   TargetLowering::CallLoweringInfo CLI(DAG);
8374   CLI.setDebugLoc(getCurSDLoc())
8375       .setChain(getRoot())
8376       .setCallee(RetTy, FTy, Callee, std::move(Args), CB)
8377       .setTailCall(isTailCall)
8378       .setConvergent(CB.isConvergent())
8379       .setIsPreallocated(
8380           CB.countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0)
8381       .setCFIType(CFIType);
8382   std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
8383 
8384   if (Result.first.getNode()) {
8385     Result.first = lowerRangeToAssertZExt(DAG, CB, Result.first);
8386     setValue(&CB, Result.first);
8387   }
8388 
8389   // The last element of CLI.InVals has the SDValue for swifterror return.
8390   // Here we copy it to a virtual register and update SwiftErrorMap for
8391   // book-keeping.
8392   if (SwiftErrorVal && TLI.supportSwiftError()) {
8393     // Get the last element of InVals.
8394     SDValue Src = CLI.InVals.back();
8395     Register VReg =
8396         SwiftError.getOrCreateVRegDefAt(&CB, FuncInfo.MBB, SwiftErrorVal);
8397     SDValue CopyNode = CLI.DAG.getCopyToReg(Result.second, CLI.DL, VReg, Src);
8398     DAG.setRoot(CopyNode);
8399   }
8400 }
8401 
8402 static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
8403                              SelectionDAGBuilder &Builder) {
8404   // Check to see if this load can be trivially constant folded, e.g. if the
8405   // input is from a string literal.
8406   if (const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) {
8407     // Cast pointer to the type we really want to load.
8408     Type *LoadTy =
8409         Type::getIntNTy(PtrVal->getContext(), LoadVT.getScalarSizeInBits());
8410     if (LoadVT.isVector())
8411       LoadTy = FixedVectorType::get(LoadTy, LoadVT.getVectorNumElements());
8412 
8413     LoadInput = ConstantExpr::getBitCast(const_cast<Constant *>(LoadInput),
8414                                          PointerType::getUnqual(LoadTy));
8415 
8416     if (const Constant *LoadCst =
8417             ConstantFoldLoadFromConstPtr(const_cast<Constant *>(LoadInput),
8418                                          LoadTy, Builder.DAG.getDataLayout()))
8419       return Builder.getValue(LoadCst);
8420   }
8421 
8422   // Otherwise, we have to emit the load.  If the pointer is to unfoldable but
8423   // still constant memory, the input chain can be the entry node.
8424   SDValue Root;
8425   bool ConstantMemory = false;
8426 
8427   // Do not serialize (non-volatile) loads of constant memory with anything.
8428   if (Builder.AA && Builder.AA->pointsToConstantMemory(PtrVal)) {
8429     Root = Builder.DAG.getEntryNode();
8430     ConstantMemory = true;
8431   } else {
8432     // Do not serialize non-volatile loads against each other.
8433     Root = Builder.DAG.getRoot();
8434   }
8435 
8436   SDValue Ptr = Builder.getValue(PtrVal);
8437   SDValue LoadVal =
8438       Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root, Ptr,
8439                           MachinePointerInfo(PtrVal), Align(1));
8440 
8441   if (!ConstantMemory)
8442     Builder.PendingLoads.push_back(LoadVal.getValue(1));
8443   return LoadVal;
8444 }
8445 
8446 /// Record the value for an instruction that produces an integer result,
8447 /// converting the type where necessary.
8448 void SelectionDAGBuilder::processIntegerCallValue(const Instruction &I,
8449                                                   SDValue Value,
8450                                                   bool IsSigned) {
8451   EVT VT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
8452                                                     I.getType(), true);
8453   Value = DAG.getExtOrTrunc(IsSigned, Value, getCurSDLoc(), VT);
8454   setValue(&I, Value);
8455 }
8456 
8457 /// See if we can lower a memcmp/bcmp call into an optimized form. If so, return
8458 /// true and lower it. Otherwise return false, and it will be lowered like a
8459 /// normal call.
8460 /// The caller already checked that \p I calls the appropriate LibFunc with a
8461 /// correct prototype.
8462 bool SelectionDAGBuilder::visitMemCmpBCmpCall(const CallInst &I) {
8463   const Value *LHS = I.getArgOperand(0), *RHS = I.getArgOperand(1);
8464   const Value *Size = I.getArgOperand(2);
8465   const ConstantSDNode *CSize = dyn_cast<ConstantSDNode>(getValue(Size));
8466   if (CSize && CSize->getZExtValue() == 0) {
8467     EVT CallVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
8468                                                           I.getType(), true);
8469     setValue(&I, DAG.getConstant(0, getCurSDLoc(), CallVT));
8470     return true;
8471   }
8472 
8473   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
8474   std::pair<SDValue, SDValue> Res = TSI.EmitTargetCodeForMemcmp(
8475       DAG, getCurSDLoc(), DAG.getRoot(), getValue(LHS), getValue(RHS),
8476       getValue(Size), MachinePointerInfo(LHS), MachinePointerInfo(RHS));
8477   if (Res.first.getNode()) {
8478     processIntegerCallValue(I, Res.first, true);
8479     PendingLoads.push_back(Res.second);
8480     return true;
8481   }
8482 
8483   // memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS)  != 0
8484   // memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS)  != 0
8485   if (!CSize || !isOnlyUsedInZeroEqualityComparison(&I))
8486     return false;
8487 
8488   // If the target has a fast compare for the given size, it will return a
8489   // preferred load type for that size. Require that the load VT is legal and
8490   // that the target supports unaligned loads of that type. Otherwise, return
8491   // INVALID.
8492   auto hasFastLoadsAndCompare = [&](unsigned NumBits) {
8493     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8494     MVT LVT = TLI.hasFastEqualityCompare(NumBits);
8495     if (LVT != MVT::INVALID_SIMPLE_VALUE_TYPE) {
8496       // TODO: Handle 5 byte compare as 4-byte + 1 byte.
8497       // TODO: Handle 8 byte compare on x86-32 as two 32-bit loads.
8498       // TODO: Check alignment of src and dest ptrs.
8499       unsigned DstAS = LHS->getType()->getPointerAddressSpace();
8500       unsigned SrcAS = RHS->getType()->getPointerAddressSpace();
8501       if (!TLI.isTypeLegal(LVT) ||
8502           !TLI.allowsMisalignedMemoryAccesses(LVT, SrcAS) ||
8503           !TLI.allowsMisalignedMemoryAccesses(LVT, DstAS))
8504         LVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
8505     }
8506 
8507     return LVT;
8508   };
8509 
8510   // This turns into unaligned loads. We only do this if the target natively
8511   // supports the MVT we'll be loading or if it is small enough (<= 4) that
8512   // we'll only produce a small number of byte loads.
8513   MVT LoadVT;
8514   unsigned NumBitsToCompare = CSize->getZExtValue() * 8;
8515   switch (NumBitsToCompare) {
8516   default:
8517     return false;
8518   case 16:
8519     LoadVT = MVT::i16;
8520     break;
8521   case 32:
8522     LoadVT = MVT::i32;
8523     break;
8524   case 64:
8525   case 128:
8526   case 256:
8527     LoadVT = hasFastLoadsAndCompare(NumBitsToCompare);
8528     break;
8529   }
8530 
8531   if (LoadVT == MVT::INVALID_SIMPLE_VALUE_TYPE)
8532     return false;
8533 
8534   SDValue LoadL = getMemCmpLoad(LHS, LoadVT, *this);
8535   SDValue LoadR = getMemCmpLoad(RHS, LoadVT, *this);
8536 
8537   // Bitcast to a wide integer type if the loads are vectors.
8538   if (LoadVT.isVector()) {
8539     EVT CmpVT = EVT::getIntegerVT(LHS->getContext(), LoadVT.getSizeInBits());
8540     LoadL = DAG.getBitcast(CmpVT, LoadL);
8541     LoadR = DAG.getBitcast(CmpVT, LoadR);
8542   }
8543 
8544   SDValue Cmp = DAG.getSetCC(getCurSDLoc(), MVT::i1, LoadL, LoadR, ISD::SETNE);
8545   processIntegerCallValue(I, Cmp, false);
8546   return true;
8547 }
8548 
8549 /// See if we can lower a memchr call into an optimized form. If so, return
8550 /// true and lower it. Otherwise return false, and it will be lowered like a
8551 /// normal call.
8552 /// The caller already checked that \p I calls the appropriate LibFunc with a
8553 /// correct prototype.
8554 bool SelectionDAGBuilder::visitMemChrCall(const CallInst &I) {
8555   const Value *Src = I.getArgOperand(0);
8556   const Value *Char = I.getArgOperand(1);
8557   const Value *Length = I.getArgOperand(2);
8558 
8559   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
8560   std::pair<SDValue, SDValue> Res =
8561     TSI.EmitTargetCodeForMemchr(DAG, getCurSDLoc(), DAG.getRoot(),
8562                                 getValue(Src), getValue(Char), getValue(Length),
8563                                 MachinePointerInfo(Src));
8564   if (Res.first.getNode()) {
8565     setValue(&I, Res.first);
8566     PendingLoads.push_back(Res.second);
8567     return true;
8568   }
8569 
8570   return false;
8571 }
8572 
8573 /// See if we can lower a mempcpy call into an optimized form. If so, return
8574 /// true and lower it. Otherwise return false, and it will be lowered like a
8575 /// normal call.
8576 /// The caller already checked that \p I calls the appropriate LibFunc with a
8577 /// correct prototype.
8578 bool SelectionDAGBuilder::visitMemPCpyCall(const CallInst &I) {
8579   SDValue Dst = getValue(I.getArgOperand(0));
8580   SDValue Src = getValue(I.getArgOperand(1));
8581   SDValue Size = getValue(I.getArgOperand(2));
8582 
8583   Align DstAlign = DAG.InferPtrAlign(Dst).valueOrOne();
8584   Align SrcAlign = DAG.InferPtrAlign(Src).valueOrOne();
8585   // DAG::getMemcpy needs Alignment to be defined.
8586   Align Alignment = std::min(DstAlign, SrcAlign);
8587 
8588   SDLoc sdl = getCurSDLoc();
8589 
8590   // In the mempcpy context we need to pass in a false value for isTailCall
8591   // because the return pointer needs to be adjusted by the size of
8592   // the copied memory.
8593   SDValue Root = getMemoryRoot();
8594   SDValue MC = DAG.getMemcpy(Root, sdl, Dst, Src, Size, Alignment, false, false,
8595                              /*isTailCall=*/false,
8596                              MachinePointerInfo(I.getArgOperand(0)),
8597                              MachinePointerInfo(I.getArgOperand(1)),
8598                              I.getAAMetadata());
8599   assert(MC.getNode() != nullptr &&
8600          "** memcpy should not be lowered as TailCall in mempcpy context **");
8601   DAG.setRoot(MC);
8602 
8603   // Check if Size needs to be truncated or extended.
8604   Size = DAG.getSExtOrTrunc(Size, sdl, Dst.getValueType());
8605 
8606   // Adjust return pointer to point just past the last dst byte.
8607   SDValue DstPlusSize = DAG.getNode(ISD::ADD, sdl, Dst.getValueType(),
8608                                     Dst, Size);
8609   setValue(&I, DstPlusSize);
8610   return true;
8611 }
8612 
8613 /// See if we can lower a strcpy call into an optimized form.  If so, return
8614 /// true and lower it, otherwise return false and it will be lowered like a
8615 /// normal call.
8616 /// The caller already checked that \p I calls the appropriate LibFunc with a
8617 /// correct prototype.
8618 bool SelectionDAGBuilder::visitStrCpyCall(const CallInst &I, bool isStpcpy) {
8619   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
8620 
8621   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
8622   std::pair<SDValue, SDValue> Res =
8623     TSI.EmitTargetCodeForStrcpy(DAG, getCurSDLoc(), getRoot(),
8624                                 getValue(Arg0), getValue(Arg1),
8625                                 MachinePointerInfo(Arg0),
8626                                 MachinePointerInfo(Arg1), isStpcpy);
8627   if (Res.first.getNode()) {
8628     setValue(&I, Res.first);
8629     DAG.setRoot(Res.second);
8630     return true;
8631   }
8632 
8633   return false;
8634 }
8635 
8636 /// See if we can lower a strcmp call into an optimized form.  If so, return
8637 /// true and lower it, otherwise return false and it will be lowered like a
8638 /// normal call.
8639 /// The caller already checked that \p I calls the appropriate LibFunc with a
8640 /// correct prototype.
8641 bool SelectionDAGBuilder::visitStrCmpCall(const CallInst &I) {
8642   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
8643 
8644   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
8645   std::pair<SDValue, SDValue> Res =
8646     TSI.EmitTargetCodeForStrcmp(DAG, getCurSDLoc(), DAG.getRoot(),
8647                                 getValue(Arg0), getValue(Arg1),
8648                                 MachinePointerInfo(Arg0),
8649                                 MachinePointerInfo(Arg1));
8650   if (Res.first.getNode()) {
8651     processIntegerCallValue(I, Res.first, true);
8652     PendingLoads.push_back(Res.second);
8653     return true;
8654   }
8655 
8656   return false;
8657 }
8658 
8659 /// See if we can lower a strlen call into an optimized form.  If so, return
8660 /// true and lower it, otherwise return false and it will be lowered like a
8661 /// normal call.
8662 /// The caller already checked that \p I calls the appropriate LibFunc with a
8663 /// correct prototype.
8664 bool SelectionDAGBuilder::visitStrLenCall(const CallInst &I) {
8665   const Value *Arg0 = I.getArgOperand(0);
8666 
8667   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
8668   std::pair<SDValue, SDValue> Res =
8669     TSI.EmitTargetCodeForStrlen(DAG, getCurSDLoc(), DAG.getRoot(),
8670                                 getValue(Arg0), MachinePointerInfo(Arg0));
8671   if (Res.first.getNode()) {
8672     processIntegerCallValue(I, Res.first, false);
8673     PendingLoads.push_back(Res.second);
8674     return true;
8675   }
8676 
8677   return false;
8678 }
8679 
8680 /// See if we can lower a strnlen call into an optimized form.  If so, return
8681 /// true and lower it, otherwise return false and it will be lowered like a
8682 /// normal call.
8683 /// The caller already checked that \p I calls the appropriate LibFunc with a
8684 /// correct prototype.
8685 bool SelectionDAGBuilder::visitStrNLenCall(const CallInst &I) {
8686   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
8687 
8688   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
8689   std::pair<SDValue, SDValue> Res =
8690     TSI.EmitTargetCodeForStrnlen(DAG, getCurSDLoc(), DAG.getRoot(),
8691                                  getValue(Arg0), getValue(Arg1),
8692                                  MachinePointerInfo(Arg0));
8693   if (Res.first.getNode()) {
8694     processIntegerCallValue(I, Res.first, false);
8695     PendingLoads.push_back(Res.second);
8696     return true;
8697   }
8698 
8699   return false;
8700 }
8701 
8702 /// See if we can lower a unary floating-point operation into an SDNode with
8703 /// the specified Opcode.  If so, return true and lower it, otherwise return
8704 /// false and it will be lowered like a normal call.
8705 /// The caller already checked that \p I calls the appropriate LibFunc with a
8706 /// correct prototype.
8707 bool SelectionDAGBuilder::visitUnaryFloatCall(const CallInst &I,
8708                                               unsigned Opcode) {
8709   // We already checked this call's prototype; verify it doesn't modify errno.
8710   if (!I.onlyReadsMemory())
8711     return false;
8712 
8713   SDNodeFlags Flags;
8714   Flags.copyFMF(cast<FPMathOperator>(I));
8715 
8716   SDValue Tmp = getValue(I.getArgOperand(0));
8717   setValue(&I,
8718            DAG.getNode(Opcode, getCurSDLoc(), Tmp.getValueType(), Tmp, Flags));
8719   return true;
8720 }
8721 
8722 /// See if we can lower a binary floating-point operation into an SDNode with
8723 /// the specified Opcode. If so, return true and lower it. Otherwise return
8724 /// false, and it will be lowered like a normal call.
8725 /// The caller already checked that \p I calls the appropriate LibFunc with a
8726 /// correct prototype.
8727 bool SelectionDAGBuilder::visitBinaryFloatCall(const CallInst &I,
8728                                                unsigned Opcode) {
8729   // We already checked this call's prototype; verify it doesn't modify errno.
8730   if (!I.onlyReadsMemory())
8731     return false;
8732 
8733   SDNodeFlags Flags;
8734   Flags.copyFMF(cast<FPMathOperator>(I));
8735 
8736   SDValue Tmp0 = getValue(I.getArgOperand(0));
8737   SDValue Tmp1 = getValue(I.getArgOperand(1));
8738   EVT VT = Tmp0.getValueType();
8739   setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), VT, Tmp0, Tmp1, Flags));
8740   return true;
8741 }
8742 
8743 void SelectionDAGBuilder::visitCall(const CallInst &I) {
8744   // Handle inline assembly differently.
8745   if (I.isInlineAsm()) {
8746     visitInlineAsm(I);
8747     return;
8748   }
8749 
8750   diagnoseDontCall(I);
8751 
8752   if (Function *F = I.getCalledFunction()) {
8753     if (F->isDeclaration()) {
8754       // Is this an LLVM intrinsic or a target-specific intrinsic?
8755       unsigned IID = F->getIntrinsicID();
8756       if (!IID)
8757         if (const TargetIntrinsicInfo *II = TM.getIntrinsicInfo())
8758           IID = II->getIntrinsicID(F);
8759 
8760       if (IID) {
8761         visitIntrinsicCall(I, IID);
8762         return;
8763       }
8764     }
8765 
8766     // Check for well-known libc/libm calls.  If the function is internal, it
8767     // can't be a library call.  Don't do the check if marked as nobuiltin for
8768     // some reason or the call site requires strict floating point semantics.
8769     LibFunc Func;
8770     if (!I.isNoBuiltin() && !I.isStrictFP() && !F->hasLocalLinkage() &&
8771         F->hasName() && LibInfo->getLibFunc(*F, Func) &&
8772         LibInfo->hasOptimizedCodeGen(Func)) {
8773       switch (Func) {
8774       default: break;
8775       case LibFunc_bcmp:
8776         if (visitMemCmpBCmpCall(I))
8777           return;
8778         break;
8779       case LibFunc_copysign:
8780       case LibFunc_copysignf:
8781       case LibFunc_copysignl:
8782         // We already checked this call's prototype; verify it doesn't modify
8783         // errno.
8784         if (I.onlyReadsMemory()) {
8785           SDValue LHS = getValue(I.getArgOperand(0));
8786           SDValue RHS = getValue(I.getArgOperand(1));
8787           setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurSDLoc(),
8788                                    LHS.getValueType(), LHS, RHS));
8789           return;
8790         }
8791         break;
8792       case LibFunc_fabs:
8793       case LibFunc_fabsf:
8794       case LibFunc_fabsl:
8795         if (visitUnaryFloatCall(I, ISD::FABS))
8796           return;
8797         break;
8798       case LibFunc_fmin:
8799       case LibFunc_fminf:
8800       case LibFunc_fminl:
8801         if (visitBinaryFloatCall(I, ISD::FMINNUM))
8802           return;
8803         break;
8804       case LibFunc_fmax:
8805       case LibFunc_fmaxf:
8806       case LibFunc_fmaxl:
8807         if (visitBinaryFloatCall(I, ISD::FMAXNUM))
8808           return;
8809         break;
8810       case LibFunc_sin:
8811       case LibFunc_sinf:
8812       case LibFunc_sinl:
8813         if (visitUnaryFloatCall(I, ISD::FSIN))
8814           return;
8815         break;
8816       case LibFunc_cos:
8817       case LibFunc_cosf:
8818       case LibFunc_cosl:
8819         if (visitUnaryFloatCall(I, ISD::FCOS))
8820           return;
8821         break;
8822       case LibFunc_sqrt:
8823       case LibFunc_sqrtf:
8824       case LibFunc_sqrtl:
8825       case LibFunc_sqrt_finite:
8826       case LibFunc_sqrtf_finite:
8827       case LibFunc_sqrtl_finite:
8828         if (visitUnaryFloatCall(I, ISD::FSQRT))
8829           return;
8830         break;
8831       case LibFunc_floor:
8832       case LibFunc_floorf:
8833       case LibFunc_floorl:
8834         if (visitUnaryFloatCall(I, ISD::FFLOOR))
8835           return;
8836         break;
8837       case LibFunc_nearbyint:
8838       case LibFunc_nearbyintf:
8839       case LibFunc_nearbyintl:
8840         if (visitUnaryFloatCall(I, ISD::FNEARBYINT))
8841           return;
8842         break;
8843       case LibFunc_ceil:
8844       case LibFunc_ceilf:
8845       case LibFunc_ceill:
8846         if (visitUnaryFloatCall(I, ISD::FCEIL))
8847           return;
8848         break;
8849       case LibFunc_rint:
8850       case LibFunc_rintf:
8851       case LibFunc_rintl:
8852         if (visitUnaryFloatCall(I, ISD::FRINT))
8853           return;
8854         break;
8855       case LibFunc_round:
8856       case LibFunc_roundf:
8857       case LibFunc_roundl:
8858         if (visitUnaryFloatCall(I, ISD::FROUND))
8859           return;
8860         break;
8861       case LibFunc_trunc:
8862       case LibFunc_truncf:
8863       case LibFunc_truncl:
8864         if (visitUnaryFloatCall(I, ISD::FTRUNC))
8865           return;
8866         break;
8867       case LibFunc_log2:
8868       case LibFunc_log2f:
8869       case LibFunc_log2l:
8870         if (visitUnaryFloatCall(I, ISD::FLOG2))
8871           return;
8872         break;
8873       case LibFunc_exp2:
8874       case LibFunc_exp2f:
8875       case LibFunc_exp2l:
8876         if (visitUnaryFloatCall(I, ISD::FEXP2))
8877           return;
8878         break;
8879       case LibFunc_exp10:
8880       case LibFunc_exp10f:
8881       case LibFunc_exp10l:
8882         if (visitUnaryFloatCall(I, ISD::FEXP10))
8883           return;
8884         break;
8885       case LibFunc_ldexp:
8886       case LibFunc_ldexpf:
8887       case LibFunc_ldexpl:
8888         if (visitBinaryFloatCall(I, ISD::FLDEXP))
8889           return;
8890         break;
8891       case LibFunc_memcmp:
8892         if (visitMemCmpBCmpCall(I))
8893           return;
8894         break;
8895       case LibFunc_mempcpy:
8896         if (visitMemPCpyCall(I))
8897           return;
8898         break;
8899       case LibFunc_memchr:
8900         if (visitMemChrCall(I))
8901           return;
8902         break;
8903       case LibFunc_strcpy:
8904         if (visitStrCpyCall(I, false))
8905           return;
8906         break;
8907       case LibFunc_stpcpy:
8908         if (visitStrCpyCall(I, true))
8909           return;
8910         break;
8911       case LibFunc_strcmp:
8912         if (visitStrCmpCall(I))
8913           return;
8914         break;
8915       case LibFunc_strlen:
8916         if (visitStrLenCall(I))
8917           return;
8918         break;
8919       case LibFunc_strnlen:
8920         if (visitStrNLenCall(I))
8921           return;
8922         break;
8923       }
8924     }
8925   }
8926 
8927   // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
8928   // have to do anything here to lower funclet bundles.
8929   // CFGuardTarget bundles are lowered in LowerCallTo.
8930   assert(!I.hasOperandBundlesOtherThan(
8931              {LLVMContext::OB_deopt, LLVMContext::OB_funclet,
8932               LLVMContext::OB_cfguardtarget, LLVMContext::OB_preallocated,
8933               LLVMContext::OB_clang_arc_attachedcall, LLVMContext::OB_kcfi}) &&
8934          "Cannot lower calls with arbitrary operand bundles!");
8935 
8936   SDValue Callee = getValue(I.getCalledOperand());
8937 
8938   if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
8939     LowerCallSiteWithDeoptBundle(&I, Callee, nullptr);
8940   else
8941     // Check if we can potentially perform a tail call. More detailed checking
8942     // is be done within LowerCallTo, after more information about the call is
8943     // known.
8944     LowerCallTo(I, Callee, I.isTailCall(), I.isMustTailCall());
8945 }
8946 
8947 namespace {
8948 
8949 /// AsmOperandInfo - This contains information for each constraint that we are
8950 /// lowering.
8951 class SDISelAsmOperandInfo : public TargetLowering::AsmOperandInfo {
8952 public:
8953   /// CallOperand - If this is the result output operand or a clobber
8954   /// this is null, otherwise it is the incoming operand to the CallInst.
8955   /// This gets modified as the asm is processed.
8956   SDValue CallOperand;
8957 
8958   /// AssignedRegs - If this is a register or register class operand, this
8959   /// contains the set of register corresponding to the operand.
8960   RegsForValue AssignedRegs;
8961 
8962   explicit SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &info)
8963     : TargetLowering::AsmOperandInfo(info), CallOperand(nullptr, 0) {
8964   }
8965 
8966   /// Whether or not this operand accesses memory
8967   bool hasMemory(const TargetLowering &TLI) const {
8968     // Indirect operand accesses access memory.
8969     if (isIndirect)
8970       return true;
8971 
8972     for (const auto &Code : Codes)
8973       if (TLI.getConstraintType(Code) == TargetLowering::C_Memory)
8974         return true;
8975 
8976     return false;
8977   }
8978 };
8979 
8980 
8981 } // end anonymous namespace
8982 
8983 /// Make sure that the output operand \p OpInfo and its corresponding input
8984 /// operand \p MatchingOpInfo have compatible constraint types (otherwise error
8985 /// out).
8986 static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo,
8987                                SDISelAsmOperandInfo &MatchingOpInfo,
8988                                SelectionDAG &DAG) {
8989   if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT)
8990     return;
8991 
8992   const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
8993   const auto &TLI = DAG.getTargetLoweringInfo();
8994 
8995   std::pair<unsigned, const TargetRegisterClass *> MatchRC =
8996       TLI.getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode,
8997                                        OpInfo.ConstraintVT);
8998   std::pair<unsigned, const TargetRegisterClass *> InputRC =
8999       TLI.getRegForInlineAsmConstraint(TRI, MatchingOpInfo.ConstraintCode,
9000                                        MatchingOpInfo.ConstraintVT);
9001   if ((OpInfo.ConstraintVT.isInteger() !=
9002        MatchingOpInfo.ConstraintVT.isInteger()) ||
9003       (MatchRC.second != InputRC.second)) {
9004     // FIXME: error out in a more elegant fashion
9005     report_fatal_error("Unsupported asm: input constraint"
9006                        " with a matching output constraint of"
9007                        " incompatible type!");
9008   }
9009   MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT;
9010 }
9011 
9012 /// Get a direct memory input to behave well as an indirect operand.
9013 /// This may introduce stores, hence the need for a \p Chain.
9014 /// \return The (possibly updated) chain.
9015 static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location,
9016                                         SDISelAsmOperandInfo &OpInfo,
9017                                         SelectionDAG &DAG) {
9018   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9019 
9020   // If we don't have an indirect input, put it in the constpool if we can,
9021   // otherwise spill it to a stack slot.
9022   // TODO: This isn't quite right. We need to handle these according to
9023   // the addressing mode that the constraint wants. Also, this may take
9024   // an additional register for the computation and we don't want that
9025   // either.
9026 
9027   // If the operand is a float, integer, or vector constant, spill to a
9028   // constant pool entry to get its address.
9029   const Value *OpVal = OpInfo.CallOperandVal;
9030   if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
9031       isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) {
9032     OpInfo.CallOperand = DAG.getConstantPool(
9033         cast<Constant>(OpVal), TLI.getPointerTy(DAG.getDataLayout()));
9034     return Chain;
9035   }
9036 
9037   // Otherwise, create a stack slot and emit a store to it before the asm.
9038   Type *Ty = OpVal->getType();
9039   auto &DL = DAG.getDataLayout();
9040   uint64_t TySize = DL.getTypeAllocSize(Ty);
9041   MachineFunction &MF = DAG.getMachineFunction();
9042   int SSFI = MF.getFrameInfo().CreateStackObject(
9043       TySize, DL.getPrefTypeAlign(Ty), false);
9044   SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getFrameIndexTy(DL));
9045   Chain = DAG.getTruncStore(Chain, Location, OpInfo.CallOperand, StackSlot,
9046                             MachinePointerInfo::getFixedStack(MF, SSFI),
9047                             TLI.getMemValueType(DL, Ty));
9048   OpInfo.CallOperand = StackSlot;
9049 
9050   return Chain;
9051 }
9052 
9053 /// GetRegistersForValue - Assign registers (virtual or physical) for the
9054 /// specified operand.  We prefer to assign virtual registers, to allow the
9055 /// register allocator to handle the assignment process.  However, if the asm
9056 /// uses features that we can't model on machineinstrs, we have SDISel do the
9057 /// allocation.  This produces generally horrible, but correct, code.
9058 ///
9059 ///   OpInfo describes the operand
9060 ///   RefOpInfo describes the matching operand if any, the operand otherwise
9061 static std::optional<unsigned>
9062 getRegistersForValue(SelectionDAG &DAG, const SDLoc &DL,
9063                      SDISelAsmOperandInfo &OpInfo,
9064                      SDISelAsmOperandInfo &RefOpInfo) {
9065   LLVMContext &Context = *DAG.getContext();
9066   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9067 
9068   MachineFunction &MF = DAG.getMachineFunction();
9069   SmallVector<unsigned, 4> Regs;
9070   const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
9071 
9072   // No work to do for memory/address operands.
9073   if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
9074       OpInfo.ConstraintType == TargetLowering::C_Address)
9075     return std::nullopt;
9076 
9077   // If this is a constraint for a single physreg, or a constraint for a
9078   // register class, find it.
9079   unsigned AssignedReg;
9080   const TargetRegisterClass *RC;
9081   std::tie(AssignedReg, RC) = TLI.getRegForInlineAsmConstraint(
9082       &TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT);
9083   // RC is unset only on failure. Return immediately.
9084   if (!RC)
9085     return std::nullopt;
9086 
9087   // Get the actual register value type.  This is important, because the user
9088   // may have asked for (e.g.) the AX register in i32 type.  We need to
9089   // remember that AX is actually i16 to get the right extension.
9090   const MVT RegVT = *TRI.legalclasstypes_begin(*RC);
9091 
9092   if (OpInfo.ConstraintVT != MVT::Other && RegVT != MVT::Untyped) {
9093     // If this is an FP operand in an integer register (or visa versa), or more
9094     // generally if the operand value disagrees with the register class we plan
9095     // to stick it in, fix the operand type.
9096     //
9097     // If this is an input value, the bitcast to the new type is done now.
9098     // Bitcast for output value is done at the end of visitInlineAsm().
9099     if ((OpInfo.Type == InlineAsm::isOutput ||
9100          OpInfo.Type == InlineAsm::isInput) &&
9101         !TRI.isTypeLegalForClass(*RC, OpInfo.ConstraintVT)) {
9102       // Try to convert to the first EVT that the reg class contains.  If the
9103       // types are identical size, use a bitcast to convert (e.g. two differing
9104       // vector types).  Note: output bitcast is done at the end of
9105       // visitInlineAsm().
9106       if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
9107         // Exclude indirect inputs while they are unsupported because the code
9108         // to perform the load is missing and thus OpInfo.CallOperand still
9109         // refers to the input address rather than the pointed-to value.
9110         if (OpInfo.Type == InlineAsm::isInput && !OpInfo.isIndirect)
9111           OpInfo.CallOperand =
9112               DAG.getNode(ISD::BITCAST, DL, RegVT, OpInfo.CallOperand);
9113         OpInfo.ConstraintVT = RegVT;
9114         // If the operand is an FP value and we want it in integer registers,
9115         // use the corresponding integer type. This turns an f64 value into
9116         // i64, which can be passed with two i32 values on a 32-bit machine.
9117       } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
9118         MVT VT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits());
9119         if (OpInfo.Type == InlineAsm::isInput)
9120           OpInfo.CallOperand =
9121               DAG.getNode(ISD::BITCAST, DL, VT, OpInfo.CallOperand);
9122         OpInfo.ConstraintVT = VT;
9123       }
9124     }
9125   }
9126 
9127   // No need to allocate a matching input constraint since the constraint it's
9128   // matching to has already been allocated.
9129   if (OpInfo.isMatchingInputConstraint())
9130     return std::nullopt;
9131 
9132   EVT ValueVT = OpInfo.ConstraintVT;
9133   if (OpInfo.ConstraintVT == MVT::Other)
9134     ValueVT = RegVT;
9135 
9136   // Initialize NumRegs.
9137   unsigned NumRegs = 1;
9138   if (OpInfo.ConstraintVT != MVT::Other)
9139     NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT, RegVT);
9140 
9141   // If this is a constraint for a specific physical register, like {r17},
9142   // assign it now.
9143 
9144   // If this associated to a specific register, initialize iterator to correct
9145   // place. If virtual, make sure we have enough registers
9146 
9147   // Initialize iterator if necessary
9148   TargetRegisterClass::iterator I = RC->begin();
9149   MachineRegisterInfo &RegInfo = MF.getRegInfo();
9150 
9151   // Do not check for single registers.
9152   if (AssignedReg) {
9153     I = std::find(I, RC->end(), AssignedReg);
9154     if (I == RC->end()) {
9155       // RC does not contain the selected register, which indicates a
9156       // mismatch between the register and the required type/bitwidth.
9157       return {AssignedReg};
9158     }
9159   }
9160 
9161   for (; NumRegs; --NumRegs, ++I) {
9162     assert(I != RC->end() && "Ran out of registers to allocate!");
9163     Register R = AssignedReg ? Register(*I) : RegInfo.createVirtualRegister(RC);
9164     Regs.push_back(R);
9165   }
9166 
9167   OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
9168   return std::nullopt;
9169 }
9170 
9171 static unsigned
9172 findMatchingInlineAsmOperand(unsigned OperandNo,
9173                              const std::vector<SDValue> &AsmNodeOperands) {
9174   // Scan until we find the definition we already emitted of this operand.
9175   unsigned CurOp = InlineAsm::Op_FirstOperand;
9176   for (; OperandNo; --OperandNo) {
9177     // Advance to the next operand.
9178     unsigned OpFlag =
9179         cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
9180     const InlineAsm::Flag F(OpFlag);
9181     assert(
9182         (F.isRegDefKind() || F.isRegDefEarlyClobberKind() || F.isMemKind()) &&
9183         "Skipped past definitions?");
9184     CurOp += F.getNumOperandRegisters() + 1;
9185   }
9186   return CurOp;
9187 }
9188 
9189 namespace {
9190 
9191 class ExtraFlags {
9192   unsigned Flags = 0;
9193 
9194 public:
9195   explicit ExtraFlags(const CallBase &Call) {
9196     const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
9197     if (IA->hasSideEffects())
9198       Flags |= InlineAsm::Extra_HasSideEffects;
9199     if (IA->isAlignStack())
9200       Flags |= InlineAsm::Extra_IsAlignStack;
9201     if (Call.isConvergent())
9202       Flags |= InlineAsm::Extra_IsConvergent;
9203     Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
9204   }
9205 
9206   void update(const TargetLowering::AsmOperandInfo &OpInfo) {
9207     // Ideally, we would only check against memory constraints.  However, the
9208     // meaning of an Other constraint can be target-specific and we can't easily
9209     // reason about it.  Therefore, be conservative and set MayLoad/MayStore
9210     // for Other constraints as well.
9211     if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
9212         OpInfo.ConstraintType == TargetLowering::C_Other) {
9213       if (OpInfo.Type == InlineAsm::isInput)
9214         Flags |= InlineAsm::Extra_MayLoad;
9215       else if (OpInfo.Type == InlineAsm::isOutput)
9216         Flags |= InlineAsm::Extra_MayStore;
9217       else if (OpInfo.Type == InlineAsm::isClobber)
9218         Flags |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore);
9219     }
9220   }
9221 
9222   unsigned get() const { return Flags; }
9223 };
9224 
9225 } // end anonymous namespace
9226 
9227 static bool isFunction(SDValue Op) {
9228   if (Op && Op.getOpcode() == ISD::GlobalAddress) {
9229     if (auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
9230       auto Fn = dyn_cast_or_null<Function>(GA->getGlobal());
9231 
9232       // In normal "call dllimport func" instruction (non-inlineasm) it force
9233       // indirect access by specifing call opcode. And usually specially print
9234       // asm with indirect symbol (i.g: "*") according to opcode. Inline asm can
9235       // not do in this way now. (In fact, this is similar with "Data Access"
9236       // action). So here we ignore dllimport function.
9237       if (Fn && !Fn->hasDLLImportStorageClass())
9238         return true;
9239     }
9240   }
9241   return false;
9242 }
9243 
9244 /// visitInlineAsm - Handle a call to an InlineAsm object.
9245 void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call,
9246                                          const BasicBlock *EHPadBB) {
9247   const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
9248 
9249   /// ConstraintOperands - Information about all of the constraints.
9250   SmallVector<SDISelAsmOperandInfo, 16> ConstraintOperands;
9251 
9252   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9253   TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints(
9254       DAG.getDataLayout(), DAG.getSubtarget().getRegisterInfo(), Call);
9255 
9256   // First Pass: Calculate HasSideEffects and ExtraFlags (AlignStack,
9257   // AsmDialect, MayLoad, MayStore).
9258   bool HasSideEffect = IA->hasSideEffects();
9259   ExtraFlags ExtraInfo(Call);
9260 
9261   for (auto &T : TargetConstraints) {
9262     ConstraintOperands.push_back(SDISelAsmOperandInfo(T));
9263     SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
9264 
9265     if (OpInfo.CallOperandVal)
9266       OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
9267 
9268     if (!HasSideEffect)
9269       HasSideEffect = OpInfo.hasMemory(TLI);
9270 
9271     // Determine if this InlineAsm MayLoad or MayStore based on the constraints.
9272     // FIXME: Could we compute this on OpInfo rather than T?
9273 
9274     // Compute the constraint code and ConstraintType to use.
9275     TLI.ComputeConstraintToUse(T, SDValue());
9276 
9277     if (T.ConstraintType == TargetLowering::C_Immediate &&
9278         OpInfo.CallOperand && !isa<ConstantSDNode>(OpInfo.CallOperand))
9279       // We've delayed emitting a diagnostic like the "n" constraint because
9280       // inlining could cause an integer showing up.
9281       return emitInlineAsmError(Call, "constraint '" + Twine(T.ConstraintCode) +
9282                                           "' expects an integer constant "
9283                                           "expression");
9284 
9285     ExtraInfo.update(T);
9286   }
9287 
9288   // We won't need to flush pending loads if this asm doesn't touch
9289   // memory and is nonvolatile.
9290   SDValue Glue, Chain = (HasSideEffect) ? getRoot() : DAG.getRoot();
9291 
9292   bool EmitEHLabels = isa<InvokeInst>(Call);
9293   if (EmitEHLabels) {
9294     assert(EHPadBB && "InvokeInst must have an EHPadBB");
9295   }
9296   bool IsCallBr = isa<CallBrInst>(Call);
9297 
9298   if (IsCallBr || EmitEHLabels) {
9299     // If this is a callbr or invoke we need to flush pending exports since
9300     // inlineasm_br and invoke are terminators.
9301     // We need to do this before nodes are glued to the inlineasm_br node.
9302     Chain = getControlRoot();
9303   }
9304 
9305   MCSymbol *BeginLabel = nullptr;
9306   if (EmitEHLabels) {
9307     Chain = lowerStartEH(Chain, EHPadBB, BeginLabel);
9308   }
9309 
9310   int OpNo = -1;
9311   SmallVector<StringRef> AsmStrs;
9312   IA->collectAsmStrs(AsmStrs);
9313 
9314   // Second pass over the constraints: compute which constraint option to use.
9315   for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
9316     if (OpInfo.hasArg() || OpInfo.Type == InlineAsm::isOutput)
9317       OpNo++;
9318 
9319     // If this is an output operand with a matching input operand, look up the
9320     // matching input. If their types mismatch, e.g. one is an integer, the
9321     // other is floating point, or their sizes are different, flag it as an
9322     // error.
9323     if (OpInfo.hasMatchingInput()) {
9324       SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
9325       patchMatchingInput(OpInfo, Input, DAG);
9326     }
9327 
9328     // Compute the constraint code and ConstraintType to use.
9329     TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
9330 
9331     if ((OpInfo.ConstraintType == TargetLowering::C_Memory &&
9332          OpInfo.Type == InlineAsm::isClobber) ||
9333         OpInfo.ConstraintType == TargetLowering::C_Address)
9334       continue;
9335 
9336     // In Linux PIC model, there are 4 cases about value/label addressing:
9337     //
9338     // 1: Function call or Label jmp inside the module.
9339     // 2: Data access (such as global variable, static variable) inside module.
9340     // 3: Function call or Label jmp outside the module.
9341     // 4: Data access (such as global variable) outside the module.
9342     //
9343     // Due to current llvm inline asm architecture designed to not "recognize"
9344     // the asm code, there are quite troubles for us to treat mem addressing
9345     // differently for same value/adress used in different instuctions.
9346     // For example, in pic model, call a func may in plt way or direclty
9347     // pc-related, but lea/mov a function adress may use got.
9348     //
9349     // Here we try to "recognize" function call for the case 1 and case 3 in
9350     // inline asm. And try to adjust the constraint for them.
9351     //
9352     // TODO: Due to current inline asm didn't encourage to jmp to the outsider
9353     // label, so here we don't handle jmp function label now, but we need to
9354     // enhance it (especilly in PIC model) if we meet meaningful requirements.
9355     if (OpInfo.isIndirect && isFunction(OpInfo.CallOperand) &&
9356         TLI.isInlineAsmTargetBranch(AsmStrs, OpNo) &&
9357         TM.getCodeModel() != CodeModel::Large) {
9358       OpInfo.isIndirect = false;
9359       OpInfo.ConstraintType = TargetLowering::C_Address;
9360     }
9361 
9362     // If this is a memory input, and if the operand is not indirect, do what we
9363     // need to provide an address for the memory input.
9364     if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
9365         !OpInfo.isIndirect) {
9366       assert((OpInfo.isMultipleAlternative ||
9367               (OpInfo.Type == InlineAsm::isInput)) &&
9368              "Can only indirectify direct input operands!");
9369 
9370       // Memory operands really want the address of the value.
9371       Chain = getAddressForMemoryInput(Chain, getCurSDLoc(), OpInfo, DAG);
9372 
9373       // There is no longer a Value* corresponding to this operand.
9374       OpInfo.CallOperandVal = nullptr;
9375 
9376       // It is now an indirect operand.
9377       OpInfo.isIndirect = true;
9378     }
9379 
9380   }
9381 
9382   // AsmNodeOperands - The operands for the ISD::INLINEASM node.
9383   std::vector<SDValue> AsmNodeOperands;
9384   AsmNodeOperands.push_back(SDValue());  // reserve space for input chain
9385   AsmNodeOperands.push_back(DAG.getTargetExternalSymbol(
9386       IA->getAsmString().c_str(), TLI.getProgramPointerTy(DAG.getDataLayout())));
9387 
9388   // If we have a !srcloc metadata node associated with it, we want to attach
9389   // this to the ultimately generated inline asm machineinstr.  To do this, we
9390   // pass in the third operand as this (potentially null) inline asm MDNode.
9391   const MDNode *SrcLoc = Call.getMetadata("srcloc");
9392   AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc));
9393 
9394   // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore
9395   // bits as operand 3.
9396   AsmNodeOperands.push_back(DAG.getTargetConstant(
9397       ExtraInfo.get(), getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
9398 
9399   // Third pass: Loop over operands to prepare DAG-level operands.. As part of
9400   // this, assign virtual and physical registers for inputs and otput.
9401   for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
9402     // Assign Registers.
9403     SDISelAsmOperandInfo &RefOpInfo =
9404         OpInfo.isMatchingInputConstraint()
9405             ? ConstraintOperands[OpInfo.getMatchedOperand()]
9406             : OpInfo;
9407     const auto RegError =
9408         getRegistersForValue(DAG, getCurSDLoc(), OpInfo, RefOpInfo);
9409     if (RegError) {
9410       const MachineFunction &MF = DAG.getMachineFunction();
9411       const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
9412       const char *RegName = TRI.getName(*RegError);
9413       emitInlineAsmError(Call, "register '" + Twine(RegName) +
9414                                    "' allocated for constraint '" +
9415                                    Twine(OpInfo.ConstraintCode) +
9416                                    "' does not match required type");
9417       return;
9418     }
9419 
9420     auto DetectWriteToReservedRegister = [&]() {
9421       const MachineFunction &MF = DAG.getMachineFunction();
9422       const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
9423       for (unsigned Reg : OpInfo.AssignedRegs.Regs) {
9424         if (Register::isPhysicalRegister(Reg) &&
9425             TRI.isInlineAsmReadOnlyReg(MF, Reg)) {
9426           const char *RegName = TRI.getName(Reg);
9427           emitInlineAsmError(Call, "write to reserved register '" +
9428                                        Twine(RegName) + "'");
9429           return true;
9430         }
9431       }
9432       return false;
9433     };
9434     assert((OpInfo.ConstraintType != TargetLowering::C_Address ||
9435             (OpInfo.Type == InlineAsm::isInput &&
9436              !OpInfo.isMatchingInputConstraint())) &&
9437            "Only address as input operand is allowed.");
9438 
9439     switch (OpInfo.Type) {
9440     case InlineAsm::isOutput:
9441       if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
9442         const InlineAsm::ConstraintCode ConstraintID =
9443             TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
9444         assert(ConstraintID != InlineAsm::ConstraintCode::Unknown &&
9445                "Failed to convert memory constraint code to constraint id.");
9446 
9447         // Add information to the INLINEASM node to know about this output.
9448         InlineAsm::Flag OpFlags(InlineAsm::Kind::Mem, 1);
9449         OpFlags.setMemConstraint(ConstraintID);
9450         AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlags, getCurSDLoc(),
9451                                                         MVT::i32));
9452         AsmNodeOperands.push_back(OpInfo.CallOperand);
9453       } else {
9454         // Otherwise, this outputs to a register (directly for C_Register /
9455         // C_RegisterClass, and a target-defined fashion for
9456         // C_Immediate/C_Other). Find a register that we can use.
9457         if (OpInfo.AssignedRegs.Regs.empty()) {
9458           emitInlineAsmError(
9459               Call, "couldn't allocate output register for constraint '" +
9460                         Twine(OpInfo.ConstraintCode) + "'");
9461           return;
9462         }
9463 
9464         if (DetectWriteToReservedRegister())
9465           return;
9466 
9467         // Add information to the INLINEASM node to know that this register is
9468         // set.
9469         OpInfo.AssignedRegs.AddInlineAsmOperands(
9470             OpInfo.isEarlyClobber ? InlineAsm::Kind::RegDefEarlyClobber
9471                                   : InlineAsm::Kind::RegDef,
9472             false, 0, getCurSDLoc(), DAG, AsmNodeOperands);
9473       }
9474       break;
9475 
9476     case InlineAsm::isInput:
9477     case InlineAsm::isLabel: {
9478       SDValue InOperandVal = OpInfo.CallOperand;
9479 
9480       if (OpInfo.isMatchingInputConstraint()) {
9481         // If this is required to match an output register we have already set,
9482         // just use its register.
9483         auto CurOp = findMatchingInlineAsmOperand(OpInfo.getMatchedOperand(),
9484                                                   AsmNodeOperands);
9485         InlineAsm::Flag Flag(
9486             cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue());
9487         if (Flag.isRegDefKind() || Flag.isRegDefEarlyClobberKind()) {
9488           if (OpInfo.isIndirect) {
9489             // This happens on gcc/testsuite/gcc.dg/pr8788-1.c
9490             emitInlineAsmError(Call, "inline asm not supported yet: "
9491                                      "don't know how to handle tied "
9492                                      "indirect register inputs");
9493             return;
9494           }
9495 
9496           SmallVector<unsigned, 4> Regs;
9497           MachineFunction &MF = DAG.getMachineFunction();
9498           MachineRegisterInfo &MRI = MF.getRegInfo();
9499           const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
9500           auto *R = cast<RegisterSDNode>(AsmNodeOperands[CurOp+1]);
9501           Register TiedReg = R->getReg();
9502           MVT RegVT = R->getSimpleValueType(0);
9503           const TargetRegisterClass *RC =
9504               TiedReg.isVirtual()     ? MRI.getRegClass(TiedReg)
9505               : RegVT != MVT::Untyped ? TLI.getRegClassFor(RegVT)
9506                                       : TRI.getMinimalPhysRegClass(TiedReg);
9507           for (unsigned i = 0, e = Flag.getNumOperandRegisters(); i != e; ++i)
9508             Regs.push_back(MRI.createVirtualRegister(RC));
9509 
9510           RegsForValue MatchedRegs(Regs, RegVT, InOperandVal.getValueType());
9511 
9512           SDLoc dl = getCurSDLoc();
9513           // Use the produced MatchedRegs object to
9514           MatchedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Glue, &Call);
9515           MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind::RegUse, true,
9516                                            OpInfo.getMatchedOperand(), dl, DAG,
9517                                            AsmNodeOperands);
9518           break;
9519         }
9520 
9521         assert(Flag.isMemKind() && "Unknown matching constraint!");
9522         assert(Flag.getNumOperandRegisters() == 1 &&
9523                "Unexpected number of operands");
9524         // Add information to the INLINEASM node to know about this input.
9525         // See InlineAsm.h isUseOperandTiedToDef.
9526         Flag.clearMemConstraint();
9527         Flag.setMatchingOp(OpInfo.getMatchedOperand());
9528         AsmNodeOperands.push_back(DAG.getTargetConstant(
9529             Flag, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
9530         AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
9531         break;
9532       }
9533 
9534       // Treat indirect 'X' constraint as memory.
9535       if (OpInfo.ConstraintType == TargetLowering::C_Other &&
9536           OpInfo.isIndirect)
9537         OpInfo.ConstraintType = TargetLowering::C_Memory;
9538 
9539       if (OpInfo.ConstraintType == TargetLowering::C_Immediate ||
9540           OpInfo.ConstraintType == TargetLowering::C_Other) {
9541         std::vector<SDValue> Ops;
9542         TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode,
9543                                           Ops, DAG);
9544         if (Ops.empty()) {
9545           if (OpInfo.ConstraintType == TargetLowering::C_Immediate)
9546             if (isa<ConstantSDNode>(InOperandVal)) {
9547               emitInlineAsmError(Call, "value out of range for constraint '" +
9548                                            Twine(OpInfo.ConstraintCode) + "'");
9549               return;
9550             }
9551 
9552           emitInlineAsmError(Call,
9553                              "invalid operand for inline asm constraint '" +
9554                                  Twine(OpInfo.ConstraintCode) + "'");
9555           return;
9556         }
9557 
9558         // Add information to the INLINEASM node to know about this input.
9559         InlineAsm::Flag ResOpType(InlineAsm::Kind::Imm, Ops.size());
9560         AsmNodeOperands.push_back(DAG.getTargetConstant(
9561             ResOpType, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
9562         llvm::append_range(AsmNodeOperands, Ops);
9563         break;
9564       }
9565 
9566       if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
9567         assert((OpInfo.isIndirect ||
9568                 OpInfo.ConstraintType != TargetLowering::C_Memory) &&
9569                "Operand must be indirect to be a mem!");
9570         assert(InOperandVal.getValueType() ==
9571                    TLI.getPointerTy(DAG.getDataLayout()) &&
9572                "Memory operands expect pointer values");
9573 
9574         const InlineAsm::ConstraintCode ConstraintID =
9575             TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
9576         assert(ConstraintID != InlineAsm::ConstraintCode::Unknown &&
9577                "Failed to convert memory constraint code to constraint id.");
9578 
9579         // Add information to the INLINEASM node to know about this input.
9580         InlineAsm::Flag ResOpType(InlineAsm::Kind::Mem, 1);
9581         ResOpType.setMemConstraint(ConstraintID);
9582         AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
9583                                                         getCurSDLoc(),
9584                                                         MVT::i32));
9585         AsmNodeOperands.push_back(InOperandVal);
9586         break;
9587       }
9588 
9589       if (OpInfo.ConstraintType == TargetLowering::C_Address) {
9590         const InlineAsm::ConstraintCode ConstraintID =
9591             TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
9592         assert(ConstraintID != InlineAsm::ConstraintCode::Unknown &&
9593                "Failed to convert memory constraint code to constraint id.");
9594 
9595         InlineAsm::Flag ResOpType(InlineAsm::Kind::Mem, 1);
9596 
9597         SDValue AsmOp = InOperandVal;
9598         if (isFunction(InOperandVal)) {
9599           auto *GA = cast<GlobalAddressSDNode>(InOperandVal);
9600           ResOpType = InlineAsm::Flag(InlineAsm::Kind::Func, 1);
9601           AsmOp = DAG.getTargetGlobalAddress(GA->getGlobal(), getCurSDLoc(),
9602                                              InOperandVal.getValueType(),
9603                                              GA->getOffset());
9604         }
9605 
9606         // Add information to the INLINEASM node to know about this input.
9607         ResOpType.setMemConstraint(ConstraintID);
9608 
9609         AsmNodeOperands.push_back(
9610             DAG.getTargetConstant(ResOpType, getCurSDLoc(), MVT::i32));
9611 
9612         AsmNodeOperands.push_back(AsmOp);
9613         break;
9614       }
9615 
9616       assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
9617               OpInfo.ConstraintType == TargetLowering::C_Register) &&
9618              "Unknown constraint type!");
9619 
9620       // TODO: Support this.
9621       if (OpInfo.isIndirect) {
9622         emitInlineAsmError(
9623             Call, "Don't know how to handle indirect register inputs yet "
9624                   "for constraint '" +
9625                       Twine(OpInfo.ConstraintCode) + "'");
9626         return;
9627       }
9628 
9629       // Copy the input into the appropriate registers.
9630       if (OpInfo.AssignedRegs.Regs.empty()) {
9631         emitInlineAsmError(Call,
9632                            "couldn't allocate input reg for constraint '" +
9633                                Twine(OpInfo.ConstraintCode) + "'");
9634         return;
9635       }
9636 
9637       if (DetectWriteToReservedRegister())
9638         return;
9639 
9640       SDLoc dl = getCurSDLoc();
9641 
9642       OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Glue,
9643                                         &Call);
9644 
9645       OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind::RegUse, false,
9646                                                0, dl, DAG, AsmNodeOperands);
9647       break;
9648     }
9649     case InlineAsm::isClobber:
9650       // Add the clobbered value to the operand list, so that the register
9651       // allocator is aware that the physreg got clobbered.
9652       if (!OpInfo.AssignedRegs.Regs.empty())
9653         OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind::Clobber,
9654                                                  false, 0, getCurSDLoc(), DAG,
9655                                                  AsmNodeOperands);
9656       break;
9657     }
9658   }
9659 
9660   // Finish up input operands.  Set the input chain and add the flag last.
9661   AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
9662   if (Glue.getNode()) AsmNodeOperands.push_back(Glue);
9663 
9664   unsigned ISDOpc = IsCallBr ? ISD::INLINEASM_BR : ISD::INLINEASM;
9665   Chain = DAG.getNode(ISDOpc, getCurSDLoc(),
9666                       DAG.getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
9667   Glue = Chain.getValue(1);
9668 
9669   // Do additional work to generate outputs.
9670 
9671   SmallVector<EVT, 1> ResultVTs;
9672   SmallVector<SDValue, 1> ResultValues;
9673   SmallVector<SDValue, 8> OutChains;
9674 
9675   llvm::Type *CallResultType = Call.getType();
9676   ArrayRef<Type *> ResultTypes;
9677   if (StructType *StructResult = dyn_cast<StructType>(CallResultType))
9678     ResultTypes = StructResult->elements();
9679   else if (!CallResultType->isVoidTy())
9680     ResultTypes = ArrayRef(CallResultType);
9681 
9682   auto CurResultType = ResultTypes.begin();
9683   auto handleRegAssign = [&](SDValue V) {
9684     assert(CurResultType != ResultTypes.end() && "Unexpected value");
9685     assert((*CurResultType)->isSized() && "Unexpected unsized type");
9686     EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), *CurResultType);
9687     ++CurResultType;
9688     // If the type of the inline asm call site return value is different but has
9689     // same size as the type of the asm output bitcast it.  One example of this
9690     // is for vectors with different width / number of elements.  This can
9691     // happen for register classes that can contain multiple different value
9692     // types.  The preg or vreg allocated may not have the same VT as was
9693     // expected.
9694     //
9695     // This can also happen for a return value that disagrees with the register
9696     // class it is put in, eg. a double in a general-purpose register on a
9697     // 32-bit machine.
9698     if (ResultVT != V.getValueType() &&
9699         ResultVT.getSizeInBits() == V.getValueSizeInBits())
9700       V = DAG.getNode(ISD::BITCAST, getCurSDLoc(), ResultVT, V);
9701     else if (ResultVT != V.getValueType() && ResultVT.isInteger() &&
9702              V.getValueType().isInteger()) {
9703       // If a result value was tied to an input value, the computed result
9704       // may have a wider width than the expected result.  Extract the
9705       // relevant portion.
9706       V = DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), ResultVT, V);
9707     }
9708     assert(ResultVT == V.getValueType() && "Asm result value mismatch!");
9709     ResultVTs.push_back(ResultVT);
9710     ResultValues.push_back(V);
9711   };
9712 
9713   // Deal with output operands.
9714   for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
9715     if (OpInfo.Type == InlineAsm::isOutput) {
9716       SDValue Val;
9717       // Skip trivial output operands.
9718       if (OpInfo.AssignedRegs.Regs.empty())
9719         continue;
9720 
9721       switch (OpInfo.ConstraintType) {
9722       case TargetLowering::C_Register:
9723       case TargetLowering::C_RegisterClass:
9724         Val = OpInfo.AssignedRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(),
9725                                                   Chain, &Glue, &Call);
9726         break;
9727       case TargetLowering::C_Immediate:
9728       case TargetLowering::C_Other:
9729         Val = TLI.LowerAsmOutputForConstraint(Chain, Glue, getCurSDLoc(),
9730                                               OpInfo, DAG);
9731         break;
9732       case TargetLowering::C_Memory:
9733         break; // Already handled.
9734       case TargetLowering::C_Address:
9735         break; // Silence warning.
9736       case TargetLowering::C_Unknown:
9737         assert(false && "Unexpected unknown constraint");
9738       }
9739 
9740       // Indirect output manifest as stores. Record output chains.
9741       if (OpInfo.isIndirect) {
9742         const Value *Ptr = OpInfo.CallOperandVal;
9743         assert(Ptr && "Expected value CallOperandVal for indirect asm operand");
9744         SDValue Store = DAG.getStore(Chain, getCurSDLoc(), Val, getValue(Ptr),
9745                                      MachinePointerInfo(Ptr));
9746         OutChains.push_back(Store);
9747       } else {
9748         // generate CopyFromRegs to associated registers.
9749         assert(!Call.getType()->isVoidTy() && "Bad inline asm!");
9750         if (Val.getOpcode() == ISD::MERGE_VALUES) {
9751           for (const SDValue &V : Val->op_values())
9752             handleRegAssign(V);
9753         } else
9754           handleRegAssign(Val);
9755       }
9756     }
9757   }
9758 
9759   // Set results.
9760   if (!ResultValues.empty()) {
9761     assert(CurResultType == ResultTypes.end() &&
9762            "Mismatch in number of ResultTypes");
9763     assert(ResultValues.size() == ResultTypes.size() &&
9764            "Mismatch in number of output operands in asm result");
9765 
9766     SDValue V = DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
9767                             DAG.getVTList(ResultVTs), ResultValues);
9768     setValue(&Call, V);
9769   }
9770 
9771   // Collect store chains.
9772   if (!OutChains.empty())
9773     Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, OutChains);
9774 
9775   if (EmitEHLabels) {
9776     Chain = lowerEndEH(Chain, cast<InvokeInst>(&Call), EHPadBB, BeginLabel);
9777   }
9778 
9779   // Only Update Root if inline assembly has a memory effect.
9780   if (ResultValues.empty() || HasSideEffect || !OutChains.empty() || IsCallBr ||
9781       EmitEHLabels)
9782     DAG.setRoot(Chain);
9783 }
9784 
9785 void SelectionDAGBuilder::emitInlineAsmError(const CallBase &Call,
9786                                              const Twine &Message) {
9787   LLVMContext &Ctx = *DAG.getContext();
9788   Ctx.emitError(&Call, Message);
9789 
9790   // Make sure we leave the DAG in a valid state
9791   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9792   SmallVector<EVT, 1> ValueVTs;
9793   ComputeValueVTs(TLI, DAG.getDataLayout(), Call.getType(), ValueVTs);
9794 
9795   if (ValueVTs.empty())
9796     return;
9797 
9798   SmallVector<SDValue, 1> Ops;
9799   for (unsigned i = 0, e = ValueVTs.size(); i != e; ++i)
9800     Ops.push_back(DAG.getUNDEF(ValueVTs[i]));
9801 
9802   setValue(&Call, DAG.getMergeValues(Ops, getCurSDLoc()));
9803 }
9804 
9805 void SelectionDAGBuilder::visitVAStart(const CallInst &I) {
9806   DAG.setRoot(DAG.getNode(ISD::VASTART, getCurSDLoc(),
9807                           MVT::Other, getRoot(),
9808                           getValue(I.getArgOperand(0)),
9809                           DAG.getSrcValue(I.getArgOperand(0))));
9810 }
9811 
9812 void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
9813   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9814   const DataLayout &DL = DAG.getDataLayout();
9815   SDValue V = DAG.getVAArg(
9816       TLI.getMemValueType(DAG.getDataLayout(), I.getType()), getCurSDLoc(),
9817       getRoot(), getValue(I.getOperand(0)), DAG.getSrcValue(I.getOperand(0)),
9818       DL.getABITypeAlign(I.getType()).value());
9819   DAG.setRoot(V.getValue(1));
9820 
9821   if (I.getType()->isPointerTy())
9822     V = DAG.getPtrExtOrTrunc(
9823         V, getCurSDLoc(), TLI.getValueType(DAG.getDataLayout(), I.getType()));
9824   setValue(&I, V);
9825 }
9826 
9827 void SelectionDAGBuilder::visitVAEnd(const CallInst &I) {
9828   DAG.setRoot(DAG.getNode(ISD::VAEND, getCurSDLoc(),
9829                           MVT::Other, getRoot(),
9830                           getValue(I.getArgOperand(0)),
9831                           DAG.getSrcValue(I.getArgOperand(0))));
9832 }
9833 
9834 void SelectionDAGBuilder::visitVACopy(const CallInst &I) {
9835   DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurSDLoc(),
9836                           MVT::Other, getRoot(),
9837                           getValue(I.getArgOperand(0)),
9838                           getValue(I.getArgOperand(1)),
9839                           DAG.getSrcValue(I.getArgOperand(0)),
9840                           DAG.getSrcValue(I.getArgOperand(1))));
9841 }
9842 
9843 SDValue SelectionDAGBuilder::lowerRangeToAssertZExt(SelectionDAG &DAG,
9844                                                     const Instruction &I,
9845                                                     SDValue Op) {
9846   const MDNode *Range = getRangeMetadata(I);
9847   if (!Range)
9848     return Op;
9849 
9850   ConstantRange CR = getConstantRangeFromMetadata(*Range);
9851   if (CR.isFullSet() || CR.isEmptySet() || CR.isUpperWrapped())
9852     return Op;
9853 
9854   APInt Lo = CR.getUnsignedMin();
9855   if (!Lo.isMinValue())
9856     return Op;
9857 
9858   APInt Hi = CR.getUnsignedMax();
9859   unsigned Bits = std::max(Hi.getActiveBits(),
9860                            static_cast<unsigned>(IntegerType::MIN_INT_BITS));
9861 
9862   EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
9863 
9864   SDLoc SL = getCurSDLoc();
9865 
9866   SDValue ZExt = DAG.getNode(ISD::AssertZext, SL, Op.getValueType(), Op,
9867                              DAG.getValueType(SmallVT));
9868   unsigned NumVals = Op.getNode()->getNumValues();
9869   if (NumVals == 1)
9870     return ZExt;
9871 
9872   SmallVector<SDValue, 4> Ops;
9873 
9874   Ops.push_back(ZExt);
9875   for (unsigned I = 1; I != NumVals; ++I)
9876     Ops.push_back(Op.getValue(I));
9877 
9878   return DAG.getMergeValues(Ops, SL);
9879 }
9880 
9881 /// Populate a CallLowerinInfo (into \p CLI) based on the properties of
9882 /// the call being lowered.
9883 ///
9884 /// This is a helper for lowering intrinsics that follow a target calling
9885 /// convention or require stack pointer adjustment. Only a subset of the
9886 /// intrinsic's operands need to participate in the calling convention.
9887 void SelectionDAGBuilder::populateCallLoweringInfo(
9888     TargetLowering::CallLoweringInfo &CLI, const CallBase *Call,
9889     unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy,
9890     AttributeSet RetAttrs, bool IsPatchPoint) {
9891   TargetLowering::ArgListTy Args;
9892   Args.reserve(NumArgs);
9893 
9894   // Populate the argument list.
9895   // Attributes for args start at offset 1, after the return attribute.
9896   for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs;
9897        ArgI != ArgE; ++ArgI) {
9898     const Value *V = Call->getOperand(ArgI);
9899 
9900     assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
9901 
9902     TargetLowering::ArgListEntry Entry;
9903     Entry.Node = getValue(V);
9904     Entry.Ty = V->getType();
9905     Entry.setAttributes(Call, ArgI);
9906     Args.push_back(Entry);
9907   }
9908 
9909   CLI.setDebugLoc(getCurSDLoc())
9910       .setChain(getRoot())
9911       .setCallee(Call->getCallingConv(), ReturnTy, Callee, std::move(Args),
9912                  RetAttrs)
9913       .setDiscardResult(Call->use_empty())
9914       .setIsPatchPoint(IsPatchPoint)
9915       .setIsPreallocated(
9916           Call->countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0);
9917 }
9918 
9919 /// Add a stack map intrinsic call's live variable operands to a stackmap
9920 /// or patchpoint target node's operand list.
9921 ///
9922 /// Constants are converted to TargetConstants purely as an optimization to
9923 /// avoid constant materialization and register allocation.
9924 ///
9925 /// FrameIndex operands are converted to TargetFrameIndex so that ISEL does not
9926 /// generate addess computation nodes, and so FinalizeISel can convert the
9927 /// TargetFrameIndex into a DirectMemRefOp StackMap location. This avoids
9928 /// address materialization and register allocation, but may also be required
9929 /// for correctness. If a StackMap (or PatchPoint) intrinsic directly uses an
9930 /// alloca in the entry block, then the runtime may assume that the alloca's
9931 /// StackMap location can be read immediately after compilation and that the
9932 /// location is valid at any point during execution (this is similar to the
9933 /// assumption made by the llvm.gcroot intrinsic). If the alloca's location were
9934 /// only available in a register, then the runtime would need to trap when
9935 /// execution reaches the StackMap in order to read the alloca's location.
9936 static void addStackMapLiveVars(const CallBase &Call, unsigned StartIdx,
9937                                 const SDLoc &DL, SmallVectorImpl<SDValue> &Ops,
9938                                 SelectionDAGBuilder &Builder) {
9939   SelectionDAG &DAG = Builder.DAG;
9940   for (unsigned I = StartIdx; I < Call.arg_size(); I++) {
9941     SDValue Op = Builder.getValue(Call.getArgOperand(I));
9942 
9943     // Things on the stack are pointer-typed, meaning that they are already
9944     // legal and can be emitted directly to target nodes.
9945     if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op)) {
9946       Ops.push_back(DAG.getTargetFrameIndex(FI->getIndex(), Op.getValueType()));
9947     } else {
9948       // Otherwise emit a target independent node to be legalised.
9949       Ops.push_back(Builder.getValue(Call.getArgOperand(I)));
9950     }
9951   }
9952 }
9953 
9954 /// Lower llvm.experimental.stackmap.
9955 void SelectionDAGBuilder::visitStackmap(const CallInst &CI) {
9956   // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
9957   //                                  [live variables...])
9958 
9959   assert(CI.getType()->isVoidTy() && "Stackmap cannot return a value.");
9960 
9961   SDValue Chain, InGlue, Callee;
9962   SmallVector<SDValue, 32> Ops;
9963 
9964   SDLoc DL = getCurSDLoc();
9965   Callee = getValue(CI.getCalledOperand());
9966 
9967   // The stackmap intrinsic only records the live variables (the arguments
9968   // passed to it) and emits NOPS (if requested). Unlike the patchpoint
9969   // intrinsic, this won't be lowered to a function call. This means we don't
9970   // have to worry about calling conventions and target specific lowering code.
9971   // Instead we perform the call lowering right here.
9972   //
9973   // chain, flag = CALLSEQ_START(chain, 0, 0)
9974   // chain, flag = STACKMAP(id, nbytes, ..., chain, flag)
9975   // chain, flag = CALLSEQ_END(chain, 0, 0, flag)
9976   //
9977   Chain = DAG.getCALLSEQ_START(getRoot(), 0, 0, DL);
9978   InGlue = Chain.getValue(1);
9979 
9980   // Add the STACKMAP operands, starting with DAG house-keeping.
9981   Ops.push_back(Chain);
9982   Ops.push_back(InGlue);
9983 
9984   // Add the <id>, <numShadowBytes> operands.
9985   //
9986   // These do not require legalisation, and can be emitted directly to target
9987   // constant nodes.
9988   SDValue ID = getValue(CI.getArgOperand(0));
9989   assert(ID.getValueType() == MVT::i64);
9990   SDValue IDConst = DAG.getTargetConstant(
9991       cast<ConstantSDNode>(ID)->getZExtValue(), DL, ID.getValueType());
9992   Ops.push_back(IDConst);
9993 
9994   SDValue Shad = getValue(CI.getArgOperand(1));
9995   assert(Shad.getValueType() == MVT::i32);
9996   SDValue ShadConst = DAG.getTargetConstant(
9997       cast<ConstantSDNode>(Shad)->getZExtValue(), DL, Shad.getValueType());
9998   Ops.push_back(ShadConst);
9999 
10000   // Add the live variables.
10001   addStackMapLiveVars(CI, 2, DL, Ops, *this);
10002 
10003   // Create the STACKMAP node.
10004   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
10005   Chain = DAG.getNode(ISD::STACKMAP, DL, NodeTys, Ops);
10006   InGlue = Chain.getValue(1);
10007 
10008   Chain = DAG.getCALLSEQ_END(Chain, 0, 0, InGlue, DL);
10009 
10010   // Stackmaps don't generate values, so nothing goes into the NodeMap.
10011 
10012   // Set the root to the target-lowered call chain.
10013   DAG.setRoot(Chain);
10014 
10015   // Inform the Frame Information that we have a stackmap in this function.
10016   FuncInfo.MF->getFrameInfo().setHasStackMap();
10017 }
10018 
10019 /// Lower llvm.experimental.patchpoint directly to its target opcode.
10020 void SelectionDAGBuilder::visitPatchpoint(const CallBase &CB,
10021                                           const BasicBlock *EHPadBB) {
10022   // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
10023   //                                                 i32 <numBytes>,
10024   //                                                 i8* <target>,
10025   //                                                 i32 <numArgs>,
10026   //                                                 [Args...],
10027   //                                                 [live variables...])
10028 
10029   CallingConv::ID CC = CB.getCallingConv();
10030   bool IsAnyRegCC = CC == CallingConv::AnyReg;
10031   bool HasDef = !CB.getType()->isVoidTy();
10032   SDLoc dl = getCurSDLoc();
10033   SDValue Callee = getValue(CB.getArgOperand(PatchPointOpers::TargetPos));
10034 
10035   // Handle immediate and symbolic callees.
10036   if (auto* ConstCallee = dyn_cast<ConstantSDNode>(Callee))
10037     Callee = DAG.getIntPtrConstant(ConstCallee->getZExtValue(), dl,
10038                                    /*isTarget=*/true);
10039   else if (auto* SymbolicCallee = dyn_cast<GlobalAddressSDNode>(Callee))
10040     Callee =  DAG.getTargetGlobalAddress(SymbolicCallee->getGlobal(),
10041                                          SDLoc(SymbolicCallee),
10042                                          SymbolicCallee->getValueType(0));
10043 
10044   // Get the real number of arguments participating in the call <numArgs>
10045   SDValue NArgVal = getValue(CB.getArgOperand(PatchPointOpers::NArgPos));
10046   unsigned NumArgs = cast<ConstantSDNode>(NArgVal)->getZExtValue();
10047 
10048   // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
10049   // Intrinsics include all meta-operands up to but not including CC.
10050   unsigned NumMetaOpers = PatchPointOpers::CCPos;
10051   assert(CB.arg_size() >= NumMetaOpers + NumArgs &&
10052          "Not enough arguments provided to the patchpoint intrinsic");
10053 
10054   // For AnyRegCC the arguments are lowered later on manually.
10055   unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
10056   Type *ReturnTy =
10057       IsAnyRegCC ? Type::getVoidTy(*DAG.getContext()) : CB.getType();
10058 
10059   TargetLowering::CallLoweringInfo CLI(DAG);
10060   populateCallLoweringInfo(CLI, &CB, NumMetaOpers, NumCallArgs, Callee,
10061                            ReturnTy, CB.getAttributes().getRetAttrs(), true);
10062   std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
10063 
10064   SDNode *CallEnd = Result.second.getNode();
10065   if (HasDef && (CallEnd->getOpcode() == ISD::CopyFromReg))
10066     CallEnd = CallEnd->getOperand(0).getNode();
10067 
10068   /// Get a call instruction from the call sequence chain.
10069   /// Tail calls are not allowed.
10070   assert(CallEnd->getOpcode() == ISD::CALLSEQ_END &&
10071          "Expected a callseq node.");
10072   SDNode *Call = CallEnd->getOperand(0).getNode();
10073   bool HasGlue = Call->getGluedNode();
10074 
10075   // Replace the target specific call node with the patchable intrinsic.
10076   SmallVector<SDValue, 8> Ops;
10077 
10078   // Push the chain.
10079   Ops.push_back(*(Call->op_begin()));
10080 
10081   // Optionally, push the glue (if any).
10082   if (HasGlue)
10083     Ops.push_back(*(Call->op_end() - 1));
10084 
10085   // Push the register mask info.
10086   if (HasGlue)
10087     Ops.push_back(*(Call->op_end() - 2));
10088   else
10089     Ops.push_back(*(Call->op_end() - 1));
10090 
10091   // Add the <id> and <numBytes> constants.
10092   SDValue IDVal = getValue(CB.getArgOperand(PatchPointOpers::IDPos));
10093   Ops.push_back(DAG.getTargetConstant(
10094                   cast<ConstantSDNode>(IDVal)->getZExtValue(), dl, MVT::i64));
10095   SDValue NBytesVal = getValue(CB.getArgOperand(PatchPointOpers::NBytesPos));
10096   Ops.push_back(DAG.getTargetConstant(
10097                   cast<ConstantSDNode>(NBytesVal)->getZExtValue(), dl,
10098                   MVT::i32));
10099 
10100   // Add the callee.
10101   Ops.push_back(Callee);
10102 
10103   // Adjust <numArgs> to account for any arguments that have been passed on the
10104   // stack instead.
10105   // Call Node: Chain, Target, {Args}, RegMask, [Glue]
10106   unsigned NumCallRegArgs = Call->getNumOperands() - (HasGlue ? 4 : 3);
10107   NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs;
10108   Ops.push_back(DAG.getTargetConstant(NumCallRegArgs, dl, MVT::i32));
10109 
10110   // Add the calling convention
10111   Ops.push_back(DAG.getTargetConstant((unsigned)CC, dl, MVT::i32));
10112 
10113   // Add the arguments we omitted previously. The register allocator should
10114   // place these in any free register.
10115   if (IsAnyRegCC)
10116     for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i)
10117       Ops.push_back(getValue(CB.getArgOperand(i)));
10118 
10119   // Push the arguments from the call instruction.
10120   SDNode::op_iterator e = HasGlue ? Call->op_end()-2 : Call->op_end()-1;
10121   Ops.append(Call->op_begin() + 2, e);
10122 
10123   // Push live variables for the stack map.
10124   addStackMapLiveVars(CB, NumMetaOpers + NumArgs, dl, Ops, *this);
10125 
10126   SDVTList NodeTys;
10127   if (IsAnyRegCC && HasDef) {
10128     // Create the return types based on the intrinsic definition
10129     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10130     SmallVector<EVT, 3> ValueVTs;
10131     ComputeValueVTs(TLI, DAG.getDataLayout(), CB.getType(), ValueVTs);
10132     assert(ValueVTs.size() == 1 && "Expected only one return value type.");
10133 
10134     // There is always a chain and a glue type at the end
10135     ValueVTs.push_back(MVT::Other);
10136     ValueVTs.push_back(MVT::Glue);
10137     NodeTys = DAG.getVTList(ValueVTs);
10138   } else
10139     NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
10140 
10141   // Replace the target specific call node with a PATCHPOINT node.
10142   SDValue PPV = DAG.getNode(ISD::PATCHPOINT, dl, NodeTys, Ops);
10143 
10144   // Update the NodeMap.
10145   if (HasDef) {
10146     if (IsAnyRegCC)
10147       setValue(&CB, SDValue(PPV.getNode(), 0));
10148     else
10149       setValue(&CB, Result.first);
10150   }
10151 
10152   // Fixup the consumers of the intrinsic. The chain and glue may be used in the
10153   // call sequence. Furthermore the location of the chain and glue can change
10154   // when the AnyReg calling convention is used and the intrinsic returns a
10155   // value.
10156   if (IsAnyRegCC && HasDef) {
10157     SDValue From[] = {SDValue(Call, 0), SDValue(Call, 1)};
10158     SDValue To[] = {PPV.getValue(1), PPV.getValue(2)};
10159     DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
10160   } else
10161     DAG.ReplaceAllUsesWith(Call, PPV.getNode());
10162   DAG.DeleteNode(Call);
10163 
10164   // Inform the Frame Information that we have a patchpoint in this function.
10165   FuncInfo.MF->getFrameInfo().setHasPatchPoint();
10166 }
10167 
10168 void SelectionDAGBuilder::visitVectorReduce(const CallInst &I,
10169                                             unsigned Intrinsic) {
10170   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10171   SDValue Op1 = getValue(I.getArgOperand(0));
10172   SDValue Op2;
10173   if (I.arg_size() > 1)
10174     Op2 = getValue(I.getArgOperand(1));
10175   SDLoc dl = getCurSDLoc();
10176   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
10177   SDValue Res;
10178   SDNodeFlags SDFlags;
10179   if (auto *FPMO = dyn_cast<FPMathOperator>(&I))
10180     SDFlags.copyFMF(*FPMO);
10181 
10182   switch (Intrinsic) {
10183   case Intrinsic::vector_reduce_fadd:
10184     if (SDFlags.hasAllowReassociation())
10185       Res = DAG.getNode(ISD::FADD, dl, VT, Op1,
10186                         DAG.getNode(ISD::VECREDUCE_FADD, dl, VT, Op2, SDFlags),
10187                         SDFlags);
10188     else
10189       Res = DAG.getNode(ISD::VECREDUCE_SEQ_FADD, dl, VT, Op1, Op2, SDFlags);
10190     break;
10191   case Intrinsic::vector_reduce_fmul:
10192     if (SDFlags.hasAllowReassociation())
10193       Res = DAG.getNode(ISD::FMUL, dl, VT, Op1,
10194                         DAG.getNode(ISD::VECREDUCE_FMUL, dl, VT, Op2, SDFlags),
10195                         SDFlags);
10196     else
10197       Res = DAG.getNode(ISD::VECREDUCE_SEQ_FMUL, dl, VT, Op1, Op2, SDFlags);
10198     break;
10199   case Intrinsic::vector_reduce_add:
10200     Res = DAG.getNode(ISD::VECREDUCE_ADD, dl, VT, Op1);
10201     break;
10202   case Intrinsic::vector_reduce_mul:
10203     Res = DAG.getNode(ISD::VECREDUCE_MUL, dl, VT, Op1);
10204     break;
10205   case Intrinsic::vector_reduce_and:
10206     Res = DAG.getNode(ISD::VECREDUCE_AND, dl, VT, Op1);
10207     break;
10208   case Intrinsic::vector_reduce_or:
10209     Res = DAG.getNode(ISD::VECREDUCE_OR, dl, VT, Op1);
10210     break;
10211   case Intrinsic::vector_reduce_xor:
10212     Res = DAG.getNode(ISD::VECREDUCE_XOR, dl, VT, Op1);
10213     break;
10214   case Intrinsic::vector_reduce_smax:
10215     Res = DAG.getNode(ISD::VECREDUCE_SMAX, dl, VT, Op1);
10216     break;
10217   case Intrinsic::vector_reduce_smin:
10218     Res = DAG.getNode(ISD::VECREDUCE_SMIN, dl, VT, Op1);
10219     break;
10220   case Intrinsic::vector_reduce_umax:
10221     Res = DAG.getNode(ISD::VECREDUCE_UMAX, dl, VT, Op1);
10222     break;
10223   case Intrinsic::vector_reduce_umin:
10224     Res = DAG.getNode(ISD::VECREDUCE_UMIN, dl, VT, Op1);
10225     break;
10226   case Intrinsic::vector_reduce_fmax:
10227     Res = DAG.getNode(ISD::VECREDUCE_FMAX, dl, VT, Op1, SDFlags);
10228     break;
10229   case Intrinsic::vector_reduce_fmin:
10230     Res = DAG.getNode(ISD::VECREDUCE_FMIN, dl, VT, Op1, SDFlags);
10231     break;
10232   case Intrinsic::vector_reduce_fmaximum:
10233     Res = DAG.getNode(ISD::VECREDUCE_FMAXIMUM, dl, VT, Op1, SDFlags);
10234     break;
10235   case Intrinsic::vector_reduce_fminimum:
10236     Res = DAG.getNode(ISD::VECREDUCE_FMINIMUM, dl, VT, Op1, SDFlags);
10237     break;
10238   default:
10239     llvm_unreachable("Unhandled vector reduce intrinsic");
10240   }
10241   setValue(&I, Res);
10242 }
10243 
10244 /// Returns an AttributeList representing the attributes applied to the return
10245 /// value of the given call.
10246 static AttributeList getReturnAttrs(TargetLowering::CallLoweringInfo &CLI) {
10247   SmallVector<Attribute::AttrKind, 2> Attrs;
10248   if (CLI.RetSExt)
10249     Attrs.push_back(Attribute::SExt);
10250   if (CLI.RetZExt)
10251     Attrs.push_back(Attribute::ZExt);
10252   if (CLI.IsInReg)
10253     Attrs.push_back(Attribute::InReg);
10254 
10255   return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex,
10256                             Attrs);
10257 }
10258 
10259 /// TargetLowering::LowerCallTo - This is the default LowerCallTo
10260 /// implementation, which just calls LowerCall.
10261 /// FIXME: When all targets are
10262 /// migrated to using LowerCall, this hook should be integrated into SDISel.
10263 std::pair<SDValue, SDValue>
10264 TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
10265   // Handle the incoming return values from the call.
10266   CLI.Ins.clear();
10267   Type *OrigRetTy = CLI.RetTy;
10268   SmallVector<EVT, 4> RetTys;
10269   SmallVector<uint64_t, 4> Offsets;
10270   auto &DL = CLI.DAG.getDataLayout();
10271   ComputeValueVTs(*this, DL, CLI.RetTy, RetTys, &Offsets, 0);
10272 
10273   if (CLI.IsPostTypeLegalization) {
10274     // If we are lowering a libcall after legalization, split the return type.
10275     SmallVector<EVT, 4> OldRetTys;
10276     SmallVector<uint64_t, 4> OldOffsets;
10277     RetTys.swap(OldRetTys);
10278     Offsets.swap(OldOffsets);
10279 
10280     for (size_t i = 0, e = OldRetTys.size(); i != e; ++i) {
10281       EVT RetVT = OldRetTys[i];
10282       uint64_t Offset = OldOffsets[i];
10283       MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), RetVT);
10284       unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), RetVT);
10285       unsigned RegisterVTByteSZ = RegisterVT.getSizeInBits() / 8;
10286       RetTys.append(NumRegs, RegisterVT);
10287       for (unsigned j = 0; j != NumRegs; ++j)
10288         Offsets.push_back(Offset + j * RegisterVTByteSZ);
10289     }
10290   }
10291 
10292   SmallVector<ISD::OutputArg, 4> Outs;
10293   GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, *this, DL);
10294 
10295   bool CanLowerReturn =
10296       this->CanLowerReturn(CLI.CallConv, CLI.DAG.getMachineFunction(),
10297                            CLI.IsVarArg, Outs, CLI.RetTy->getContext());
10298 
10299   SDValue DemoteStackSlot;
10300   int DemoteStackIdx = -100;
10301   if (!CanLowerReturn) {
10302     // FIXME: equivalent assert?
10303     // assert(!CS.hasInAllocaArgument() &&
10304     //        "sret demotion is incompatible with inalloca");
10305     uint64_t TySize = DL.getTypeAllocSize(CLI.RetTy);
10306     Align Alignment = DL.getPrefTypeAlign(CLI.RetTy);
10307     MachineFunction &MF = CLI.DAG.getMachineFunction();
10308     DemoteStackIdx =
10309         MF.getFrameInfo().CreateStackObject(TySize, Alignment, false);
10310     Type *StackSlotPtrType = PointerType::get(CLI.RetTy,
10311                                               DL.getAllocaAddrSpace());
10312 
10313     DemoteStackSlot = CLI.DAG.getFrameIndex(DemoteStackIdx, getFrameIndexTy(DL));
10314     ArgListEntry Entry;
10315     Entry.Node = DemoteStackSlot;
10316     Entry.Ty = StackSlotPtrType;
10317     Entry.IsSExt = false;
10318     Entry.IsZExt = false;
10319     Entry.IsInReg = false;
10320     Entry.IsSRet = true;
10321     Entry.IsNest = false;
10322     Entry.IsByVal = false;
10323     Entry.IsByRef = false;
10324     Entry.IsReturned = false;
10325     Entry.IsSwiftSelf = false;
10326     Entry.IsSwiftAsync = false;
10327     Entry.IsSwiftError = false;
10328     Entry.IsCFGuardTarget = false;
10329     Entry.Alignment = Alignment;
10330     CLI.getArgs().insert(CLI.getArgs().begin(), Entry);
10331     CLI.NumFixedArgs += 1;
10332     CLI.getArgs()[0].IndirectType = CLI.RetTy;
10333     CLI.RetTy = Type::getVoidTy(CLI.RetTy->getContext());
10334 
10335     // sret demotion isn't compatible with tail-calls, since the sret argument
10336     // points into the callers stack frame.
10337     CLI.IsTailCall = false;
10338   } else {
10339     bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
10340         CLI.RetTy, CLI.CallConv, CLI.IsVarArg, DL);
10341     for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
10342       ISD::ArgFlagsTy Flags;
10343       if (NeedsRegBlock) {
10344         Flags.setInConsecutiveRegs();
10345         if (I == RetTys.size() - 1)
10346           Flags.setInConsecutiveRegsLast();
10347       }
10348       EVT VT = RetTys[I];
10349       MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
10350                                                      CLI.CallConv, VT);
10351       unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
10352                                                        CLI.CallConv, VT);
10353       for (unsigned i = 0; i != NumRegs; ++i) {
10354         ISD::InputArg MyFlags;
10355         MyFlags.Flags = Flags;
10356         MyFlags.VT = RegisterVT;
10357         MyFlags.ArgVT = VT;
10358         MyFlags.Used = CLI.IsReturnValueUsed;
10359         if (CLI.RetTy->isPointerTy()) {
10360           MyFlags.Flags.setPointer();
10361           MyFlags.Flags.setPointerAddrSpace(
10362               cast<PointerType>(CLI.RetTy)->getAddressSpace());
10363         }
10364         if (CLI.RetSExt)
10365           MyFlags.Flags.setSExt();
10366         if (CLI.RetZExt)
10367           MyFlags.Flags.setZExt();
10368         if (CLI.IsInReg)
10369           MyFlags.Flags.setInReg();
10370         CLI.Ins.push_back(MyFlags);
10371       }
10372     }
10373   }
10374 
10375   // We push in swifterror return as the last element of CLI.Ins.
10376   ArgListTy &Args = CLI.getArgs();
10377   if (supportSwiftError()) {
10378     for (const ArgListEntry &Arg : Args) {
10379       if (Arg.IsSwiftError) {
10380         ISD::InputArg MyFlags;
10381         MyFlags.VT = getPointerTy(DL);
10382         MyFlags.ArgVT = EVT(getPointerTy(DL));
10383         MyFlags.Flags.setSwiftError();
10384         CLI.Ins.push_back(MyFlags);
10385       }
10386     }
10387   }
10388 
10389   // Handle all of the outgoing arguments.
10390   CLI.Outs.clear();
10391   CLI.OutVals.clear();
10392   for (unsigned i = 0, e = Args.size(); i != e; ++i) {
10393     SmallVector<EVT, 4> ValueVTs;
10394     ComputeValueVTs(*this, DL, Args[i].Ty, ValueVTs);
10395     // FIXME: Split arguments if CLI.IsPostTypeLegalization
10396     Type *FinalType = Args[i].Ty;
10397     if (Args[i].IsByVal)
10398       FinalType = Args[i].IndirectType;
10399     bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
10400         FinalType, CLI.CallConv, CLI.IsVarArg, DL);
10401     for (unsigned Value = 0, NumValues = ValueVTs.size(); Value != NumValues;
10402          ++Value) {
10403       EVT VT = ValueVTs[Value];
10404       Type *ArgTy = VT.getTypeForEVT(CLI.RetTy->getContext());
10405       SDValue Op = SDValue(Args[i].Node.getNode(),
10406                            Args[i].Node.getResNo() + Value);
10407       ISD::ArgFlagsTy Flags;
10408 
10409       // Certain targets (such as MIPS), may have a different ABI alignment
10410       // for a type depending on the context. Give the target a chance to
10411       // specify the alignment it wants.
10412       const Align OriginalAlignment(getABIAlignmentForCallingConv(ArgTy, DL));
10413       Flags.setOrigAlign(OriginalAlignment);
10414 
10415       if (Args[i].Ty->isPointerTy()) {
10416         Flags.setPointer();
10417         Flags.setPointerAddrSpace(
10418             cast<PointerType>(Args[i].Ty)->getAddressSpace());
10419       }
10420       if (Args[i].IsZExt)
10421         Flags.setZExt();
10422       if (Args[i].IsSExt)
10423         Flags.setSExt();
10424       if (Args[i].IsInReg) {
10425         // If we are using vectorcall calling convention, a structure that is
10426         // passed InReg - is surely an HVA
10427         if (CLI.CallConv == CallingConv::X86_VectorCall &&
10428             isa<StructType>(FinalType)) {
10429           // The first value of a structure is marked
10430           if (0 == Value)
10431             Flags.setHvaStart();
10432           Flags.setHva();
10433         }
10434         // Set InReg Flag
10435         Flags.setInReg();
10436       }
10437       if (Args[i].IsSRet)
10438         Flags.setSRet();
10439       if (Args[i].IsSwiftSelf)
10440         Flags.setSwiftSelf();
10441       if (Args[i].IsSwiftAsync)
10442         Flags.setSwiftAsync();
10443       if (Args[i].IsSwiftError)
10444         Flags.setSwiftError();
10445       if (Args[i].IsCFGuardTarget)
10446         Flags.setCFGuardTarget();
10447       if (Args[i].IsByVal)
10448         Flags.setByVal();
10449       if (Args[i].IsByRef)
10450         Flags.setByRef();
10451       if (Args[i].IsPreallocated) {
10452         Flags.setPreallocated();
10453         // Set the byval flag for CCAssignFn callbacks that don't know about
10454         // preallocated.  This way we can know how many bytes we should've
10455         // allocated and how many bytes a callee cleanup function will pop.  If
10456         // we port preallocated to more targets, we'll have to add custom
10457         // preallocated handling in the various CC lowering callbacks.
10458         Flags.setByVal();
10459       }
10460       if (Args[i].IsInAlloca) {
10461         Flags.setInAlloca();
10462         // Set the byval flag for CCAssignFn callbacks that don't know about
10463         // inalloca.  This way we can know how many bytes we should've allocated
10464         // and how many bytes a callee cleanup function will pop.  If we port
10465         // inalloca to more targets, we'll have to add custom inalloca handling
10466         // in the various CC lowering callbacks.
10467         Flags.setByVal();
10468       }
10469       Align MemAlign;
10470       if (Args[i].IsByVal || Args[i].IsInAlloca || Args[i].IsPreallocated) {
10471         unsigned FrameSize = DL.getTypeAllocSize(Args[i].IndirectType);
10472         Flags.setByValSize(FrameSize);
10473 
10474         // info is not there but there are cases it cannot get right.
10475         if (auto MA = Args[i].Alignment)
10476           MemAlign = *MA;
10477         else
10478           MemAlign = Align(getByValTypeAlignment(Args[i].IndirectType, DL));
10479       } else if (auto MA = Args[i].Alignment) {
10480         MemAlign = *MA;
10481       } else {
10482         MemAlign = OriginalAlignment;
10483       }
10484       Flags.setMemAlign(MemAlign);
10485       if (Args[i].IsNest)
10486         Flags.setNest();
10487       if (NeedsRegBlock)
10488         Flags.setInConsecutiveRegs();
10489 
10490       MVT PartVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
10491                                                  CLI.CallConv, VT);
10492       unsigned NumParts = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
10493                                                         CLI.CallConv, VT);
10494       SmallVector<SDValue, 4> Parts(NumParts);
10495       ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
10496 
10497       if (Args[i].IsSExt)
10498         ExtendKind = ISD::SIGN_EXTEND;
10499       else if (Args[i].IsZExt)
10500         ExtendKind = ISD::ZERO_EXTEND;
10501 
10502       // Conservatively only handle 'returned' on non-vectors that can be lowered,
10503       // for now.
10504       if (Args[i].IsReturned && !Op.getValueType().isVector() &&
10505           CanLowerReturn) {
10506         assert((CLI.RetTy == Args[i].Ty ||
10507                 (CLI.RetTy->isPointerTy() && Args[i].Ty->isPointerTy() &&
10508                  CLI.RetTy->getPointerAddressSpace() ==
10509                      Args[i].Ty->getPointerAddressSpace())) &&
10510                RetTys.size() == NumValues && "unexpected use of 'returned'");
10511         // Before passing 'returned' to the target lowering code, ensure that
10512         // either the register MVT and the actual EVT are the same size or that
10513         // the return value and argument are extended in the same way; in these
10514         // cases it's safe to pass the argument register value unchanged as the
10515         // return register value (although it's at the target's option whether
10516         // to do so)
10517         // TODO: allow code generation to take advantage of partially preserved
10518         // registers rather than clobbering the entire register when the
10519         // parameter extension method is not compatible with the return
10520         // extension method
10521         if ((NumParts * PartVT.getSizeInBits() == VT.getSizeInBits()) ||
10522             (ExtendKind != ISD::ANY_EXTEND && CLI.RetSExt == Args[i].IsSExt &&
10523              CLI.RetZExt == Args[i].IsZExt))
10524           Flags.setReturned();
10525       }
10526 
10527       getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT, CLI.CB,
10528                      CLI.CallConv, ExtendKind);
10529 
10530       for (unsigned j = 0; j != NumParts; ++j) {
10531         // if it isn't first piece, alignment must be 1
10532         // For scalable vectors the scalable part is currently handled
10533         // by individual targets, so we just use the known minimum size here.
10534         ISD::OutputArg MyFlags(
10535             Flags, Parts[j].getValueType().getSimpleVT(), VT,
10536             i < CLI.NumFixedArgs, i,
10537             j * Parts[j].getValueType().getStoreSize().getKnownMinValue());
10538         if (NumParts > 1 && j == 0)
10539           MyFlags.Flags.setSplit();
10540         else if (j != 0) {
10541           MyFlags.Flags.setOrigAlign(Align(1));
10542           if (j == NumParts - 1)
10543             MyFlags.Flags.setSplitEnd();
10544         }
10545 
10546         CLI.Outs.push_back(MyFlags);
10547         CLI.OutVals.push_back(Parts[j]);
10548       }
10549 
10550       if (NeedsRegBlock && Value == NumValues - 1)
10551         CLI.Outs[CLI.Outs.size() - 1].Flags.setInConsecutiveRegsLast();
10552     }
10553   }
10554 
10555   SmallVector<SDValue, 4> InVals;
10556   CLI.Chain = LowerCall(CLI, InVals);
10557 
10558   // Update CLI.InVals to use outside of this function.
10559   CLI.InVals = InVals;
10560 
10561   // Verify that the target's LowerCall behaved as expected.
10562   assert(CLI.Chain.getNode() && CLI.Chain.getValueType() == MVT::Other &&
10563          "LowerCall didn't return a valid chain!");
10564   assert((!CLI.IsTailCall || InVals.empty()) &&
10565          "LowerCall emitted a return value for a tail call!");
10566   assert((CLI.IsTailCall || InVals.size() == CLI.Ins.size()) &&
10567          "LowerCall didn't emit the correct number of values!");
10568 
10569   // For a tail call, the return value is merely live-out and there aren't
10570   // any nodes in the DAG representing it. Return a special value to
10571   // indicate that a tail call has been emitted and no more Instructions
10572   // should be processed in the current block.
10573   if (CLI.IsTailCall) {
10574     CLI.DAG.setRoot(CLI.Chain);
10575     return std::make_pair(SDValue(), SDValue());
10576   }
10577 
10578 #ifndef NDEBUG
10579   for (unsigned i = 0, e = CLI.Ins.size(); i != e; ++i) {
10580     assert(InVals[i].getNode() && "LowerCall emitted a null value!");
10581     assert(EVT(CLI.Ins[i].VT) == InVals[i].getValueType() &&
10582            "LowerCall emitted a value with the wrong type!");
10583   }
10584 #endif
10585 
10586   SmallVector<SDValue, 4> ReturnValues;
10587   if (!CanLowerReturn) {
10588     // The instruction result is the result of loading from the
10589     // hidden sret parameter.
10590     SmallVector<EVT, 1> PVTs;
10591     Type *PtrRetTy =
10592         PointerType::get(OrigRetTy->getContext(), DL.getAllocaAddrSpace());
10593 
10594     ComputeValueVTs(*this, DL, PtrRetTy, PVTs);
10595     assert(PVTs.size() == 1 && "Pointers should fit in one register");
10596     EVT PtrVT = PVTs[0];
10597 
10598     unsigned NumValues = RetTys.size();
10599     ReturnValues.resize(NumValues);
10600     SmallVector<SDValue, 4> Chains(NumValues);
10601 
10602     // An aggregate return value cannot wrap around the address space, so
10603     // offsets to its parts don't wrap either.
10604     SDNodeFlags Flags;
10605     Flags.setNoUnsignedWrap(true);
10606 
10607     MachineFunction &MF = CLI.DAG.getMachineFunction();
10608     Align HiddenSRetAlign = MF.getFrameInfo().getObjectAlign(DemoteStackIdx);
10609     for (unsigned i = 0; i < NumValues; ++i) {
10610       SDValue Add = CLI.DAG.getNode(ISD::ADD, CLI.DL, PtrVT, DemoteStackSlot,
10611                                     CLI.DAG.getConstant(Offsets[i], CLI.DL,
10612                                                         PtrVT), Flags);
10613       SDValue L = CLI.DAG.getLoad(
10614           RetTys[i], CLI.DL, CLI.Chain, Add,
10615           MachinePointerInfo::getFixedStack(CLI.DAG.getMachineFunction(),
10616                                             DemoteStackIdx, Offsets[i]),
10617           HiddenSRetAlign);
10618       ReturnValues[i] = L;
10619       Chains[i] = L.getValue(1);
10620     }
10621 
10622     CLI.Chain = CLI.DAG.getNode(ISD::TokenFactor, CLI.DL, MVT::Other, Chains);
10623   } else {
10624     // Collect the legal value parts into potentially illegal values
10625     // that correspond to the original function's return values.
10626     std::optional<ISD::NodeType> AssertOp;
10627     if (CLI.RetSExt)
10628       AssertOp = ISD::AssertSext;
10629     else if (CLI.RetZExt)
10630       AssertOp = ISD::AssertZext;
10631     unsigned CurReg = 0;
10632     for (EVT VT : RetTys) {
10633       MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
10634                                                      CLI.CallConv, VT);
10635       unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
10636                                                        CLI.CallConv, VT);
10637 
10638       ReturnValues.push_back(getCopyFromParts(CLI.DAG, CLI.DL, &InVals[CurReg],
10639                                               NumRegs, RegisterVT, VT, nullptr,
10640                                               CLI.CallConv, AssertOp));
10641       CurReg += NumRegs;
10642     }
10643 
10644     // For a function returning void, there is no return value. We can't create
10645     // such a node, so we just return a null return value in that case. In
10646     // that case, nothing will actually look at the value.
10647     if (ReturnValues.empty())
10648       return std::make_pair(SDValue(), CLI.Chain);
10649   }
10650 
10651   SDValue Res = CLI.DAG.getNode(ISD::MERGE_VALUES, CLI.DL,
10652                                 CLI.DAG.getVTList(RetTys), ReturnValues);
10653   return std::make_pair(Res, CLI.Chain);
10654 }
10655 
10656 /// Places new result values for the node in Results (their number
10657 /// and types must exactly match those of the original return values of
10658 /// the node), or leaves Results empty, which indicates that the node is not
10659 /// to be custom lowered after all.
10660 void TargetLowering::LowerOperationWrapper(SDNode *N,
10661                                            SmallVectorImpl<SDValue> &Results,
10662                                            SelectionDAG &DAG) const {
10663   SDValue Res = LowerOperation(SDValue(N, 0), DAG);
10664 
10665   if (!Res.getNode())
10666     return;
10667 
10668   // If the original node has one result, take the return value from
10669   // LowerOperation as is. It might not be result number 0.
10670   if (N->getNumValues() == 1) {
10671     Results.push_back(Res);
10672     return;
10673   }
10674 
10675   // If the original node has multiple results, then the return node should
10676   // have the same number of results.
10677   assert((N->getNumValues() == Res->getNumValues()) &&
10678       "Lowering returned the wrong number of results!");
10679 
10680   // Places new result values base on N result number.
10681   for (unsigned I = 0, E = N->getNumValues(); I != E; ++I)
10682     Results.push_back(Res.getValue(I));
10683 }
10684 
10685 SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
10686   llvm_unreachable("LowerOperation not implemented for this target!");
10687 }
10688 
10689 void SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V,
10690                                                      unsigned Reg,
10691                                                      ISD::NodeType ExtendType) {
10692   SDValue Op = getNonRegisterValue(V);
10693   assert((Op.getOpcode() != ISD::CopyFromReg ||
10694           cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
10695          "Copy from a reg to the same reg!");
10696   assert(!Register::isPhysicalRegister(Reg) && "Is a physreg");
10697 
10698   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10699   // If this is an InlineAsm we have to match the registers required, not the
10700   // notional registers required by the type.
10701 
10702   RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg, V->getType(),
10703                    std::nullopt); // This is not an ABI copy.
10704   SDValue Chain = DAG.getEntryNode();
10705 
10706   if (ExtendType == ISD::ANY_EXTEND) {
10707     auto PreferredExtendIt = FuncInfo.PreferredExtendType.find(V);
10708     if (PreferredExtendIt != FuncInfo.PreferredExtendType.end())
10709       ExtendType = PreferredExtendIt->second;
10710   }
10711   RFV.getCopyToRegs(Op, DAG, getCurSDLoc(), Chain, nullptr, V, ExtendType);
10712   PendingExports.push_back(Chain);
10713 }
10714 
10715 #include "llvm/CodeGen/SelectionDAGISel.h"
10716 
10717 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
10718 /// entry block, return true.  This includes arguments used by switches, since
10719 /// the switch may expand into multiple basic blocks.
10720 static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) {
10721   // With FastISel active, we may be splitting blocks, so force creation
10722   // of virtual registers for all non-dead arguments.
10723   if (FastISel)
10724     return A->use_empty();
10725 
10726   const BasicBlock &Entry = A->getParent()->front();
10727   for (const User *U : A->users())
10728     if (cast<Instruction>(U)->getParent() != &Entry || isa<SwitchInst>(U))
10729       return false;  // Use not in entry block.
10730 
10731   return true;
10732 }
10733 
10734 using ArgCopyElisionMapTy =
10735     DenseMap<const Argument *,
10736              std::pair<const AllocaInst *, const StoreInst *>>;
10737 
10738 /// Scan the entry block of the function in FuncInfo for arguments that look
10739 /// like copies into a local alloca. Record any copied arguments in
10740 /// ArgCopyElisionCandidates.
10741 static void
10742 findArgumentCopyElisionCandidates(const DataLayout &DL,
10743                                   FunctionLoweringInfo *FuncInfo,
10744                                   ArgCopyElisionMapTy &ArgCopyElisionCandidates) {
10745   // Record the state of every static alloca used in the entry block. Argument
10746   // allocas are all used in the entry block, so we need approximately as many
10747   // entries as we have arguments.
10748   enum StaticAllocaInfo { Unknown, Clobbered, Elidable };
10749   SmallDenseMap<const AllocaInst *, StaticAllocaInfo, 8> StaticAllocas;
10750   unsigned NumArgs = FuncInfo->Fn->arg_size();
10751   StaticAllocas.reserve(NumArgs * 2);
10752 
10753   auto GetInfoIfStaticAlloca = [&](const Value *V) -> StaticAllocaInfo * {
10754     if (!V)
10755       return nullptr;
10756     V = V->stripPointerCasts();
10757     const auto *AI = dyn_cast<AllocaInst>(V);
10758     if (!AI || !AI->isStaticAlloca() || !FuncInfo->StaticAllocaMap.count(AI))
10759       return nullptr;
10760     auto Iter = StaticAllocas.insert({AI, Unknown});
10761     return &Iter.first->second;
10762   };
10763 
10764   // Look for stores of arguments to static allocas. Look through bitcasts and
10765   // GEPs to handle type coercions, as long as the alloca is fully initialized
10766   // by the store. Any non-store use of an alloca escapes it and any subsequent
10767   // unanalyzed store might write it.
10768   // FIXME: Handle structs initialized with multiple stores.
10769   for (const Instruction &I : FuncInfo->Fn->getEntryBlock()) {
10770     // Look for stores, and handle non-store uses conservatively.
10771     const auto *SI = dyn_cast<StoreInst>(&I);
10772     if (!SI) {
10773       // We will look through cast uses, so ignore them completely.
10774       if (I.isCast())
10775         continue;
10776       // Ignore debug info and pseudo op intrinsics, they don't escape or store
10777       // to allocas.
10778       if (I.isDebugOrPseudoInst())
10779         continue;
10780       // This is an unknown instruction. Assume it escapes or writes to all
10781       // static alloca operands.
10782       for (const Use &U : I.operands()) {
10783         if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(U))
10784           *Info = StaticAllocaInfo::Clobbered;
10785       }
10786       continue;
10787     }
10788 
10789     // If the stored value is a static alloca, mark it as escaped.
10790     if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(SI->getValueOperand()))
10791       *Info = StaticAllocaInfo::Clobbered;
10792 
10793     // Check if the destination is a static alloca.
10794     const Value *Dst = SI->getPointerOperand()->stripPointerCasts();
10795     StaticAllocaInfo *Info = GetInfoIfStaticAlloca(Dst);
10796     if (!Info)
10797       continue;
10798     const AllocaInst *AI = cast<AllocaInst>(Dst);
10799 
10800     // Skip allocas that have been initialized or clobbered.
10801     if (*Info != StaticAllocaInfo::Unknown)
10802       continue;
10803 
10804     // Check if the stored value is an argument, and that this store fully
10805     // initializes the alloca.
10806     // If the argument type has padding bits we can't directly forward a pointer
10807     // as the upper bits may contain garbage.
10808     // Don't elide copies from the same argument twice.
10809     const Value *Val = SI->getValueOperand()->stripPointerCasts();
10810     const auto *Arg = dyn_cast<Argument>(Val);
10811     if (!Arg || Arg->hasPassPointeeByValueCopyAttr() ||
10812         Arg->getType()->isEmptyTy() ||
10813         DL.getTypeStoreSize(Arg->getType()) !=
10814             DL.getTypeAllocSize(AI->getAllocatedType()) ||
10815         !DL.typeSizeEqualsStoreSize(Arg->getType()) ||
10816         ArgCopyElisionCandidates.count(Arg)) {
10817       *Info = StaticAllocaInfo::Clobbered;
10818       continue;
10819     }
10820 
10821     LLVM_DEBUG(dbgs() << "Found argument copy elision candidate: " << *AI
10822                       << '\n');
10823 
10824     // Mark this alloca and store for argument copy elision.
10825     *Info = StaticAllocaInfo::Elidable;
10826     ArgCopyElisionCandidates.insert({Arg, {AI, SI}});
10827 
10828     // Stop scanning if we've seen all arguments. This will happen early in -O0
10829     // builds, which is useful, because -O0 builds have large entry blocks and
10830     // many allocas.
10831     if (ArgCopyElisionCandidates.size() == NumArgs)
10832       break;
10833   }
10834 }
10835 
10836 /// Try to elide argument copies from memory into a local alloca. Succeeds if
10837 /// ArgVal is a load from a suitable fixed stack object.
10838 static void tryToElideArgumentCopy(
10839     FunctionLoweringInfo &FuncInfo, SmallVectorImpl<SDValue> &Chains,
10840     DenseMap<int, int> &ArgCopyElisionFrameIndexMap,
10841     SmallPtrSetImpl<const Instruction *> &ElidedArgCopyInstrs,
10842     ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg,
10843     ArrayRef<SDValue> ArgVals, bool &ArgHasUses) {
10844   // Check if this is a load from a fixed stack object.
10845   auto *LNode = dyn_cast<LoadSDNode>(ArgVals[0]);
10846   if (!LNode)
10847     return;
10848   auto *FINode = dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode());
10849   if (!FINode)
10850     return;
10851 
10852   // Check that the fixed stack object is the right size and alignment.
10853   // Look at the alignment that the user wrote on the alloca instead of looking
10854   // at the stack object.
10855   auto ArgCopyIter = ArgCopyElisionCandidates.find(&Arg);
10856   assert(ArgCopyIter != ArgCopyElisionCandidates.end());
10857   const AllocaInst *AI = ArgCopyIter->second.first;
10858   int FixedIndex = FINode->getIndex();
10859   int &AllocaIndex = FuncInfo.StaticAllocaMap[AI];
10860   int OldIndex = AllocaIndex;
10861   MachineFrameInfo &MFI = FuncInfo.MF->getFrameInfo();
10862   if (MFI.getObjectSize(FixedIndex) != MFI.getObjectSize(OldIndex)) {
10863     LLVM_DEBUG(
10864         dbgs() << "  argument copy elision failed due to bad fixed stack "
10865                   "object size\n");
10866     return;
10867   }
10868   Align RequiredAlignment = AI->getAlign();
10869   if (MFI.getObjectAlign(FixedIndex) < RequiredAlignment) {
10870     LLVM_DEBUG(dbgs() << "  argument copy elision failed: alignment of alloca "
10871                          "greater than stack argument alignment ("
10872                       << DebugStr(RequiredAlignment) << " vs "
10873                       << DebugStr(MFI.getObjectAlign(FixedIndex)) << ")\n");
10874     return;
10875   }
10876 
10877   // Perform the elision. Delete the old stack object and replace its only use
10878   // in the variable info map. Mark the stack object as mutable.
10879   LLVM_DEBUG({
10880     dbgs() << "Eliding argument copy from " << Arg << " to " << *AI << '\n'
10881            << "  Replacing frame index " << OldIndex << " with " << FixedIndex
10882            << '\n';
10883   });
10884   MFI.RemoveStackObject(OldIndex);
10885   MFI.setIsImmutableObjectIndex(FixedIndex, false);
10886   AllocaIndex = FixedIndex;
10887   ArgCopyElisionFrameIndexMap.insert({OldIndex, FixedIndex});
10888   for (SDValue ArgVal : ArgVals)
10889     Chains.push_back(ArgVal.getValue(1));
10890 
10891   // Avoid emitting code for the store implementing the copy.
10892   const StoreInst *SI = ArgCopyIter->second.second;
10893   ElidedArgCopyInstrs.insert(SI);
10894 
10895   // Check for uses of the argument again so that we can avoid exporting ArgVal
10896   // if it is't used by anything other than the store.
10897   for (const Value *U : Arg.users()) {
10898     if (U != SI) {
10899       ArgHasUses = true;
10900       break;
10901     }
10902   }
10903 }
10904 
10905 void SelectionDAGISel::LowerArguments(const Function &F) {
10906   SelectionDAG &DAG = SDB->DAG;
10907   SDLoc dl = SDB->getCurSDLoc();
10908   const DataLayout &DL = DAG.getDataLayout();
10909   SmallVector<ISD::InputArg, 16> Ins;
10910 
10911   // In Naked functions we aren't going to save any registers.
10912   if (F.hasFnAttribute(Attribute::Naked))
10913     return;
10914 
10915   if (!FuncInfo->CanLowerReturn) {
10916     // Put in an sret pointer parameter before all the other parameters.
10917     SmallVector<EVT, 1> ValueVTs;
10918     ComputeValueVTs(*TLI, DAG.getDataLayout(),
10919                     PointerType::get(F.getContext(),
10920                                      DAG.getDataLayout().getAllocaAddrSpace()),
10921                     ValueVTs);
10922 
10923     // NOTE: Assuming that a pointer will never break down to more than one VT
10924     // or one register.
10925     ISD::ArgFlagsTy Flags;
10926     Flags.setSRet();
10927     MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVTs[0]);
10928     ISD::InputArg RetArg(Flags, RegisterVT, ValueVTs[0], true,
10929                          ISD::InputArg::NoArgIndex, 0);
10930     Ins.push_back(RetArg);
10931   }
10932 
10933   // Look for stores of arguments to static allocas. Mark such arguments with a
10934   // flag to ask the target to give us the memory location of that argument if
10935   // available.
10936   ArgCopyElisionMapTy ArgCopyElisionCandidates;
10937   findArgumentCopyElisionCandidates(DL, FuncInfo.get(),
10938                                     ArgCopyElisionCandidates);
10939 
10940   // Set up the incoming argument description vector.
10941   for (const Argument &Arg : F.args()) {
10942     unsigned ArgNo = Arg.getArgNo();
10943     SmallVector<EVT, 4> ValueVTs;
10944     ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
10945     bool isArgValueUsed = !Arg.use_empty();
10946     unsigned PartBase = 0;
10947     Type *FinalType = Arg.getType();
10948     if (Arg.hasAttribute(Attribute::ByVal))
10949       FinalType = Arg.getParamByValType();
10950     bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
10951         FinalType, F.getCallingConv(), F.isVarArg(), DL);
10952     for (unsigned Value = 0, NumValues = ValueVTs.size();
10953          Value != NumValues; ++Value) {
10954       EVT VT = ValueVTs[Value];
10955       Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
10956       ISD::ArgFlagsTy Flags;
10957 
10958 
10959       if (Arg.getType()->isPointerTy()) {
10960         Flags.setPointer();
10961         Flags.setPointerAddrSpace(
10962             cast<PointerType>(Arg.getType())->getAddressSpace());
10963       }
10964       if (Arg.hasAttribute(Attribute::ZExt))
10965         Flags.setZExt();
10966       if (Arg.hasAttribute(Attribute::SExt))
10967         Flags.setSExt();
10968       if (Arg.hasAttribute(Attribute::InReg)) {
10969         // If we are using vectorcall calling convention, a structure that is
10970         // passed InReg - is surely an HVA
10971         if (F.getCallingConv() == CallingConv::X86_VectorCall &&
10972             isa<StructType>(Arg.getType())) {
10973           // The first value of a structure is marked
10974           if (0 == Value)
10975             Flags.setHvaStart();
10976           Flags.setHva();
10977         }
10978         // Set InReg Flag
10979         Flags.setInReg();
10980       }
10981       if (Arg.hasAttribute(Attribute::StructRet))
10982         Flags.setSRet();
10983       if (Arg.hasAttribute(Attribute::SwiftSelf))
10984         Flags.setSwiftSelf();
10985       if (Arg.hasAttribute(Attribute::SwiftAsync))
10986         Flags.setSwiftAsync();
10987       if (Arg.hasAttribute(Attribute::SwiftError))
10988         Flags.setSwiftError();
10989       if (Arg.hasAttribute(Attribute::ByVal))
10990         Flags.setByVal();
10991       if (Arg.hasAttribute(Attribute::ByRef))
10992         Flags.setByRef();
10993       if (Arg.hasAttribute(Attribute::InAlloca)) {
10994         Flags.setInAlloca();
10995         // Set the byval flag for CCAssignFn callbacks that don't know about
10996         // inalloca.  This way we can know how many bytes we should've allocated
10997         // and how many bytes a callee cleanup function will pop.  If we port
10998         // inalloca to more targets, we'll have to add custom inalloca handling
10999         // in the various CC lowering callbacks.
11000         Flags.setByVal();
11001       }
11002       if (Arg.hasAttribute(Attribute::Preallocated)) {
11003         Flags.setPreallocated();
11004         // Set the byval flag for CCAssignFn callbacks that don't know about
11005         // preallocated.  This way we can know how many bytes we should've
11006         // allocated and how many bytes a callee cleanup function will pop.  If
11007         // we port preallocated to more targets, we'll have to add custom
11008         // preallocated handling in the various CC lowering callbacks.
11009         Flags.setByVal();
11010       }
11011 
11012       // Certain targets (such as MIPS), may have a different ABI alignment
11013       // for a type depending on the context. Give the target a chance to
11014       // specify the alignment it wants.
11015       const Align OriginalAlignment(
11016           TLI->getABIAlignmentForCallingConv(ArgTy, DL));
11017       Flags.setOrigAlign(OriginalAlignment);
11018 
11019       Align MemAlign;
11020       Type *ArgMemTy = nullptr;
11021       if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated() ||
11022           Flags.isByRef()) {
11023         if (!ArgMemTy)
11024           ArgMemTy = Arg.getPointeeInMemoryValueType();
11025 
11026         uint64_t MemSize = DL.getTypeAllocSize(ArgMemTy);
11027 
11028         // For in-memory arguments, size and alignment should be passed from FE.
11029         // BE will guess if this info is not there but there are cases it cannot
11030         // get right.
11031         if (auto ParamAlign = Arg.getParamStackAlign())
11032           MemAlign = *ParamAlign;
11033         else if ((ParamAlign = Arg.getParamAlign()))
11034           MemAlign = *ParamAlign;
11035         else
11036           MemAlign = Align(TLI->getByValTypeAlignment(ArgMemTy, DL));
11037         if (Flags.isByRef())
11038           Flags.setByRefSize(MemSize);
11039         else
11040           Flags.setByValSize(MemSize);
11041       } else if (auto ParamAlign = Arg.getParamStackAlign()) {
11042         MemAlign = *ParamAlign;
11043       } else {
11044         MemAlign = OriginalAlignment;
11045       }
11046       Flags.setMemAlign(MemAlign);
11047 
11048       if (Arg.hasAttribute(Attribute::Nest))
11049         Flags.setNest();
11050       if (NeedsRegBlock)
11051         Flags.setInConsecutiveRegs();
11052       if (ArgCopyElisionCandidates.count(&Arg))
11053         Flags.setCopyElisionCandidate();
11054       if (Arg.hasAttribute(Attribute::Returned))
11055         Flags.setReturned();
11056 
11057       MVT RegisterVT = TLI->getRegisterTypeForCallingConv(
11058           *CurDAG->getContext(), F.getCallingConv(), VT);
11059       unsigned NumRegs = TLI->getNumRegistersForCallingConv(
11060           *CurDAG->getContext(), F.getCallingConv(), VT);
11061       for (unsigned i = 0; i != NumRegs; ++i) {
11062         // For scalable vectors, use the minimum size; individual targets
11063         // are responsible for handling scalable vector arguments and
11064         // return values.
11065         ISD::InputArg MyFlags(
11066             Flags, RegisterVT, VT, isArgValueUsed, ArgNo,
11067             PartBase + i * RegisterVT.getStoreSize().getKnownMinValue());
11068         if (NumRegs > 1 && i == 0)
11069           MyFlags.Flags.setSplit();
11070         // if it isn't first piece, alignment must be 1
11071         else if (i > 0) {
11072           MyFlags.Flags.setOrigAlign(Align(1));
11073           if (i == NumRegs - 1)
11074             MyFlags.Flags.setSplitEnd();
11075         }
11076         Ins.push_back(MyFlags);
11077       }
11078       if (NeedsRegBlock && Value == NumValues - 1)
11079         Ins[Ins.size() - 1].Flags.setInConsecutiveRegsLast();
11080       PartBase += VT.getStoreSize().getKnownMinValue();
11081     }
11082   }
11083 
11084   // Call the target to set up the argument values.
11085   SmallVector<SDValue, 8> InVals;
11086   SDValue NewRoot = TLI->LowerFormalArguments(
11087       DAG.getRoot(), F.getCallingConv(), F.isVarArg(), Ins, dl, DAG, InVals);
11088 
11089   // Verify that the target's LowerFormalArguments behaved as expected.
11090   assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other &&
11091          "LowerFormalArguments didn't return a valid chain!");
11092   assert(InVals.size() == Ins.size() &&
11093          "LowerFormalArguments didn't emit the correct number of values!");
11094   LLVM_DEBUG({
11095     for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
11096       assert(InVals[i].getNode() &&
11097              "LowerFormalArguments emitted a null value!");
11098       assert(EVT(Ins[i].VT) == InVals[i].getValueType() &&
11099              "LowerFormalArguments emitted a value with the wrong type!");
11100     }
11101   });
11102 
11103   // Update the DAG with the new chain value resulting from argument lowering.
11104   DAG.setRoot(NewRoot);
11105 
11106   // Set up the argument values.
11107   unsigned i = 0;
11108   if (!FuncInfo->CanLowerReturn) {
11109     // Create a virtual register for the sret pointer, and put in a copy
11110     // from the sret argument into it.
11111     SmallVector<EVT, 1> ValueVTs;
11112     ComputeValueVTs(*TLI, DAG.getDataLayout(),
11113                     PointerType::get(F.getContext(),
11114                                      DAG.getDataLayout().getAllocaAddrSpace()),
11115                     ValueVTs);
11116     MVT VT = ValueVTs[0].getSimpleVT();
11117     MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
11118     std::optional<ISD::NodeType> AssertOp;
11119     SDValue ArgValue = getCopyFromParts(DAG, dl, &InVals[0], 1, RegVT, VT,
11120                                         nullptr, F.getCallingConv(), AssertOp);
11121 
11122     MachineFunction& MF = SDB->DAG.getMachineFunction();
11123     MachineRegisterInfo& RegInfo = MF.getRegInfo();
11124     Register SRetReg =
11125         RegInfo.createVirtualRegister(TLI->getRegClassFor(RegVT));
11126     FuncInfo->DemoteRegister = SRetReg;
11127     NewRoot =
11128         SDB->DAG.getCopyToReg(NewRoot, SDB->getCurSDLoc(), SRetReg, ArgValue);
11129     DAG.setRoot(NewRoot);
11130 
11131     // i indexes lowered arguments.  Bump it past the hidden sret argument.
11132     ++i;
11133   }
11134 
11135   SmallVector<SDValue, 4> Chains;
11136   DenseMap<int, int> ArgCopyElisionFrameIndexMap;
11137   for (const Argument &Arg : F.args()) {
11138     SmallVector<SDValue, 4> ArgValues;
11139     SmallVector<EVT, 4> ValueVTs;
11140     ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
11141     unsigned NumValues = ValueVTs.size();
11142     if (NumValues == 0)
11143       continue;
11144 
11145     bool ArgHasUses = !Arg.use_empty();
11146 
11147     // Elide the copying store if the target loaded this argument from a
11148     // suitable fixed stack object.
11149     if (Ins[i].Flags.isCopyElisionCandidate()) {
11150       unsigned NumParts = 0;
11151       for (EVT VT : ValueVTs)
11152         NumParts += TLI->getNumRegistersForCallingConv(*CurDAG->getContext(),
11153                                                        F.getCallingConv(), VT);
11154 
11155       tryToElideArgumentCopy(*FuncInfo, Chains, ArgCopyElisionFrameIndexMap,
11156                              ElidedArgCopyInstrs, ArgCopyElisionCandidates, Arg,
11157                              ArrayRef(&InVals[i], NumParts), ArgHasUses);
11158     }
11159 
11160     // If this argument is unused then remember its value. It is used to generate
11161     // debugging information.
11162     bool isSwiftErrorArg =
11163         TLI->supportSwiftError() &&
11164         Arg.hasAttribute(Attribute::SwiftError);
11165     if (!ArgHasUses && !isSwiftErrorArg) {
11166       SDB->setUnusedArgValue(&Arg, InVals[i]);
11167 
11168       // Also remember any frame index for use in FastISel.
11169       if (FrameIndexSDNode *FI =
11170           dyn_cast<FrameIndexSDNode>(InVals[i].getNode()))
11171         FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11172     }
11173 
11174     for (unsigned Val = 0; Val != NumValues; ++Val) {
11175       EVT VT = ValueVTs[Val];
11176       MVT PartVT = TLI->getRegisterTypeForCallingConv(*CurDAG->getContext(),
11177                                                       F.getCallingConv(), VT);
11178       unsigned NumParts = TLI->getNumRegistersForCallingConv(
11179           *CurDAG->getContext(), F.getCallingConv(), VT);
11180 
11181       // Even an apparent 'unused' swifterror argument needs to be returned. So
11182       // we do generate a copy for it that can be used on return from the
11183       // function.
11184       if (ArgHasUses || isSwiftErrorArg) {
11185         std::optional<ISD::NodeType> AssertOp;
11186         if (Arg.hasAttribute(Attribute::SExt))
11187           AssertOp = ISD::AssertSext;
11188         else if (Arg.hasAttribute(Attribute::ZExt))
11189           AssertOp = ISD::AssertZext;
11190 
11191         ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], NumParts,
11192                                              PartVT, VT, nullptr,
11193                                              F.getCallingConv(), AssertOp));
11194       }
11195 
11196       i += NumParts;
11197     }
11198 
11199     // We don't need to do anything else for unused arguments.
11200     if (ArgValues.empty())
11201       continue;
11202 
11203     // Note down frame index.
11204     if (FrameIndexSDNode *FI =
11205         dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode()))
11206       FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11207 
11208     SDValue Res = DAG.getMergeValues(ArrayRef(ArgValues.data(), NumValues),
11209                                      SDB->getCurSDLoc());
11210 
11211     SDB->setValue(&Arg, Res);
11212     if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::BUILD_PAIR) {
11213       // We want to associate the argument with the frame index, among
11214       // involved operands, that correspond to the lowest address. The
11215       // getCopyFromParts function, called earlier, is swapping the order of
11216       // the operands to BUILD_PAIR depending on endianness. The result of
11217       // that swapping is that the least significant bits of the argument will
11218       // be in the first operand of the BUILD_PAIR node, and the most
11219       // significant bits will be in the second operand.
11220       unsigned LowAddressOp = DAG.getDataLayout().isBigEndian() ? 1 : 0;
11221       if (LoadSDNode *LNode =
11222           dyn_cast<LoadSDNode>(Res.getOperand(LowAddressOp).getNode()))
11223         if (FrameIndexSDNode *FI =
11224             dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
11225           FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11226     }
11227 
11228     // Analyses past this point are naive and don't expect an assertion.
11229     if (Res.getOpcode() == ISD::AssertZext)
11230       Res = Res.getOperand(0);
11231 
11232     // Update the SwiftErrorVRegDefMap.
11233     if (Res.getOpcode() == ISD::CopyFromReg && isSwiftErrorArg) {
11234       unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
11235       if (Register::isVirtualRegister(Reg))
11236         SwiftError->setCurrentVReg(FuncInfo->MBB, SwiftError->getFunctionArg(),
11237                                    Reg);
11238     }
11239 
11240     // If this argument is live outside of the entry block, insert a copy from
11241     // wherever we got it to the vreg that other BB's will reference it as.
11242     if (Res.getOpcode() == ISD::CopyFromReg) {
11243       // If we can, though, try to skip creating an unnecessary vreg.
11244       // FIXME: This isn't very clean... it would be nice to make this more
11245       // general.
11246       unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
11247       if (Register::isVirtualRegister(Reg)) {
11248         FuncInfo->ValueMap[&Arg] = Reg;
11249         continue;
11250       }
11251     }
11252     if (!isOnlyUsedInEntryBlock(&Arg, TM.Options.EnableFastISel)) {
11253       FuncInfo->InitializeRegForValue(&Arg);
11254       SDB->CopyToExportRegsIfNeeded(&Arg);
11255     }
11256   }
11257 
11258   if (!Chains.empty()) {
11259     Chains.push_back(NewRoot);
11260     NewRoot = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
11261   }
11262 
11263   DAG.setRoot(NewRoot);
11264 
11265   assert(i == InVals.size() && "Argument register count mismatch!");
11266 
11267   // If any argument copy elisions occurred and we have debug info, update the
11268   // stale frame indices used in the dbg.declare variable info table.
11269   if (!ArgCopyElisionFrameIndexMap.empty()) {
11270     for (MachineFunction::VariableDbgInfo &VI :
11271          MF->getInStackSlotVariableDbgInfo()) {
11272       auto I = ArgCopyElisionFrameIndexMap.find(VI.getStackSlot());
11273       if (I != ArgCopyElisionFrameIndexMap.end())
11274         VI.updateStackSlot(I->second);
11275     }
11276   }
11277 
11278   // Finally, if the target has anything special to do, allow it to do so.
11279   emitFunctionEntryCode();
11280 }
11281 
11282 /// Handle PHI nodes in successor blocks.  Emit code into the SelectionDAG to
11283 /// ensure constants are generated when needed.  Remember the virtual registers
11284 /// that need to be added to the Machine PHI nodes as input.  We cannot just
11285 /// directly add them, because expansion might result in multiple MBB's for one
11286 /// BB.  As such, the start of the BB might correspond to a different MBB than
11287 /// the end.
11288 void
11289 SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
11290   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11291 
11292   SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
11293 
11294   // Check PHI nodes in successors that expect a value to be available from this
11295   // block.
11296   for (const BasicBlock *SuccBB : successors(LLVMBB->getTerminator())) {
11297     if (!isa<PHINode>(SuccBB->begin())) continue;
11298     MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
11299 
11300     // If this terminator has multiple identical successors (common for
11301     // switches), only handle each succ once.
11302     if (!SuccsHandled.insert(SuccMBB).second)
11303       continue;
11304 
11305     MachineBasicBlock::iterator MBBI = SuccMBB->begin();
11306 
11307     // At this point we know that there is a 1-1 correspondence between LLVM PHI
11308     // nodes and Machine PHI nodes, but the incoming operands have not been
11309     // emitted yet.
11310     for (const PHINode &PN : SuccBB->phis()) {
11311       // Ignore dead phi's.
11312       if (PN.use_empty())
11313         continue;
11314 
11315       // Skip empty types
11316       if (PN.getType()->isEmptyTy())
11317         continue;
11318 
11319       unsigned Reg;
11320       const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
11321 
11322       if (const auto *C = dyn_cast<Constant>(PHIOp)) {
11323         unsigned &RegOut = ConstantsOut[C];
11324         if (RegOut == 0) {
11325           RegOut = FuncInfo.CreateRegs(C);
11326           // We need to zero/sign extend ConstantInt phi operands to match
11327           // assumptions in FunctionLoweringInfo::ComputePHILiveOutRegInfo.
11328           ISD::NodeType ExtendType = ISD::ANY_EXTEND;
11329           if (auto *CI = dyn_cast<ConstantInt>(C))
11330             ExtendType = TLI.signExtendConstant(CI) ? ISD::SIGN_EXTEND
11331                                                     : ISD::ZERO_EXTEND;
11332           CopyValueToVirtualRegister(C, RegOut, ExtendType);
11333         }
11334         Reg = RegOut;
11335       } else {
11336         DenseMap<const Value *, Register>::iterator I =
11337           FuncInfo.ValueMap.find(PHIOp);
11338         if (I != FuncInfo.ValueMap.end())
11339           Reg = I->second;
11340         else {
11341           assert(isa<AllocaInst>(PHIOp) &&
11342                  FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
11343                  "Didn't codegen value into a register!??");
11344           Reg = FuncInfo.CreateRegs(PHIOp);
11345           CopyValueToVirtualRegister(PHIOp, Reg);
11346         }
11347       }
11348 
11349       // Remember that this register needs to added to the machine PHI node as
11350       // the input for this MBB.
11351       SmallVector<EVT, 4> ValueVTs;
11352       ComputeValueVTs(TLI, DAG.getDataLayout(), PN.getType(), ValueVTs);
11353       for (EVT VT : ValueVTs) {
11354         const unsigned NumRegisters = TLI.getNumRegisters(*DAG.getContext(), VT);
11355         for (unsigned i = 0; i != NumRegisters; ++i)
11356           FuncInfo.PHINodesToUpdate.push_back(
11357               std::make_pair(&*MBBI++, Reg + i));
11358         Reg += NumRegisters;
11359       }
11360     }
11361   }
11362 
11363   ConstantsOut.clear();
11364 }
11365 
11366 MachineBasicBlock *SelectionDAGBuilder::NextBlock(MachineBasicBlock *MBB) {
11367   MachineFunction::iterator I(MBB);
11368   if (++I == FuncInfo.MF->end())
11369     return nullptr;
11370   return &*I;
11371 }
11372 
11373 /// During lowering new call nodes can be created (such as memset, etc.).
11374 /// Those will become new roots of the current DAG, but complications arise
11375 /// when they are tail calls. In such cases, the call lowering will update
11376 /// the root, but the builder still needs to know that a tail call has been
11377 /// lowered in order to avoid generating an additional return.
11378 void SelectionDAGBuilder::updateDAGForMaybeTailCall(SDValue MaybeTC) {
11379   // If the node is null, we do have a tail call.
11380   if (MaybeTC.getNode() != nullptr)
11381     DAG.setRoot(MaybeTC);
11382   else
11383     HasTailCall = true;
11384 }
11385 
11386 void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W, Value *Cond,
11387                                         MachineBasicBlock *SwitchMBB,
11388                                         MachineBasicBlock *DefaultMBB) {
11389   MachineFunction *CurMF = FuncInfo.MF;
11390   MachineBasicBlock *NextMBB = nullptr;
11391   MachineFunction::iterator BBI(W.MBB);
11392   if (++BBI != FuncInfo.MF->end())
11393     NextMBB = &*BBI;
11394 
11395   unsigned Size = W.LastCluster - W.FirstCluster + 1;
11396 
11397   BranchProbabilityInfo *BPI = FuncInfo.BPI;
11398 
11399   if (Size == 2 && W.MBB == SwitchMBB) {
11400     // If any two of the cases has the same destination, and if one value
11401     // is the same as the other, but has one bit unset that the other has set,
11402     // use bit manipulation to do two compares at once.  For example:
11403     // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
11404     // TODO: This could be extended to merge any 2 cases in switches with 3
11405     // cases.
11406     // TODO: Handle cases where W.CaseBB != SwitchBB.
11407     CaseCluster &Small = *W.FirstCluster;
11408     CaseCluster &Big = *W.LastCluster;
11409 
11410     if (Small.Low == Small.High && Big.Low == Big.High &&
11411         Small.MBB == Big.MBB) {
11412       const APInt &SmallValue = Small.Low->getValue();
11413       const APInt &BigValue = Big.Low->getValue();
11414 
11415       // Check that there is only one bit different.
11416       APInt CommonBit = BigValue ^ SmallValue;
11417       if (CommonBit.isPowerOf2()) {
11418         SDValue CondLHS = getValue(Cond);
11419         EVT VT = CondLHS.getValueType();
11420         SDLoc DL = getCurSDLoc();
11421 
11422         SDValue Or = DAG.getNode(ISD::OR, DL, VT, CondLHS,
11423                                  DAG.getConstant(CommonBit, DL, VT));
11424         SDValue Cond = DAG.getSetCC(
11425             DL, MVT::i1, Or, DAG.getConstant(BigValue | SmallValue, DL, VT),
11426             ISD::SETEQ);
11427 
11428         // Update successor info.
11429         // Both Small and Big will jump to Small.BB, so we sum up the
11430         // probabilities.
11431         addSuccessorWithProb(SwitchMBB, Small.MBB, Small.Prob + Big.Prob);
11432         if (BPI)
11433           addSuccessorWithProb(
11434               SwitchMBB, DefaultMBB,
11435               // The default destination is the first successor in IR.
11436               BPI->getEdgeProbability(SwitchMBB->getBasicBlock(), (unsigned)0));
11437         else
11438           addSuccessorWithProb(SwitchMBB, DefaultMBB);
11439 
11440         // Insert the true branch.
11441         SDValue BrCond =
11442             DAG.getNode(ISD::BRCOND, DL, MVT::Other, getControlRoot(), Cond,
11443                         DAG.getBasicBlock(Small.MBB));
11444         // Insert the false branch.
11445         BrCond = DAG.getNode(ISD::BR, DL, MVT::Other, BrCond,
11446                              DAG.getBasicBlock(DefaultMBB));
11447 
11448         DAG.setRoot(BrCond);
11449         return;
11450       }
11451     }
11452   }
11453 
11454   if (TM.getOptLevel() != CodeGenOptLevel::None) {
11455     // Here, we order cases by probability so the most likely case will be
11456     // checked first. However, two clusters can have the same probability in
11457     // which case their relative ordering is non-deterministic. So we use Low
11458     // as a tie-breaker as clusters are guaranteed to never overlap.
11459     llvm::sort(W.FirstCluster, W.LastCluster + 1,
11460                [](const CaseCluster &a, const CaseCluster &b) {
11461       return a.Prob != b.Prob ?
11462              a.Prob > b.Prob :
11463              a.Low->getValue().slt(b.Low->getValue());
11464     });
11465 
11466     // Rearrange the case blocks so that the last one falls through if possible
11467     // without changing the order of probabilities.
11468     for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster; ) {
11469       --I;
11470       if (I->Prob > W.LastCluster->Prob)
11471         break;
11472       if (I->Kind == CC_Range && I->MBB == NextMBB) {
11473         std::swap(*I, *W.LastCluster);
11474         break;
11475       }
11476     }
11477   }
11478 
11479   // Compute total probability.
11480   BranchProbability DefaultProb = W.DefaultProb;
11481   BranchProbability UnhandledProbs = DefaultProb;
11482   for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
11483     UnhandledProbs += I->Prob;
11484 
11485   MachineBasicBlock *CurMBB = W.MBB;
11486   for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
11487     bool FallthroughUnreachable = false;
11488     MachineBasicBlock *Fallthrough;
11489     if (I == W.LastCluster) {
11490       // For the last cluster, fall through to the default destination.
11491       Fallthrough = DefaultMBB;
11492       FallthroughUnreachable = isa<UnreachableInst>(
11493           DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg());
11494     } else {
11495       Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
11496       CurMF->insert(BBI, Fallthrough);
11497       // Put Cond in a virtual register to make it available from the new blocks.
11498       ExportFromCurrentBlock(Cond);
11499     }
11500     UnhandledProbs -= I->Prob;
11501 
11502     switch (I->Kind) {
11503       case CC_JumpTable: {
11504         // FIXME: Optimize away range check based on pivot comparisons.
11505         JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first;
11506         SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second;
11507 
11508         // The jump block hasn't been inserted yet; insert it here.
11509         MachineBasicBlock *JumpMBB = JT->MBB;
11510         CurMF->insert(BBI, JumpMBB);
11511 
11512         auto JumpProb = I->Prob;
11513         auto FallthroughProb = UnhandledProbs;
11514 
11515         // If the default statement is a target of the jump table, we evenly
11516         // distribute the default probability to successors of CurMBB. Also
11517         // update the probability on the edge from JumpMBB to Fallthrough.
11518         for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
11519                                               SE = JumpMBB->succ_end();
11520              SI != SE; ++SI) {
11521           if (*SI == DefaultMBB) {
11522             JumpProb += DefaultProb / 2;
11523             FallthroughProb -= DefaultProb / 2;
11524             JumpMBB->setSuccProbability(SI, DefaultProb / 2);
11525             JumpMBB->normalizeSuccProbs();
11526             break;
11527           }
11528         }
11529 
11530         // If the default clause is unreachable, propagate that knowledge into
11531         // JTH->FallthroughUnreachable which will use it to suppress the range
11532         // check.
11533         //
11534         // However, don't do this if we're doing branch target enforcement,
11535         // because a table branch _without_ a range check can be a tempting JOP
11536         // gadget - out-of-bounds inputs that are impossible in correct
11537         // execution become possible again if an attacker can influence the
11538         // control flow. So if an attacker doesn't already have a BTI bypass
11539         // available, we don't want them to be able to get one out of this
11540         // table branch.
11541         if (FallthroughUnreachable) {
11542           Function &CurFunc = CurMF->getFunction();
11543           bool HasBranchTargetEnforcement = false;
11544           if (CurFunc.hasFnAttribute("branch-target-enforcement")) {
11545             HasBranchTargetEnforcement =
11546                 CurFunc.getFnAttribute("branch-target-enforcement")
11547                     .getValueAsBool();
11548           } else {
11549             HasBranchTargetEnforcement =
11550                 CurMF->getMMI().getModule()->getModuleFlag(
11551                     "branch-target-enforcement");
11552           }
11553           if (!HasBranchTargetEnforcement)
11554             JTH->FallthroughUnreachable = true;
11555         }
11556 
11557         if (!JTH->FallthroughUnreachable)
11558           addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
11559         addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
11560         CurMBB->normalizeSuccProbs();
11561 
11562         // The jump table header will be inserted in our current block, do the
11563         // range check, and fall through to our fallthrough block.
11564         JTH->HeaderBB = CurMBB;
11565         JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
11566 
11567         // If we're in the right place, emit the jump table header right now.
11568         if (CurMBB == SwitchMBB) {
11569           visitJumpTableHeader(*JT, *JTH, SwitchMBB);
11570           JTH->Emitted = true;
11571         }
11572         break;
11573       }
11574       case CC_BitTests: {
11575         // FIXME: Optimize away range check based on pivot comparisons.
11576         BitTestBlock *BTB = &SL->BitTestCases[I->BTCasesIndex];
11577 
11578         // The bit test blocks haven't been inserted yet; insert them here.
11579         for (BitTestCase &BTC : BTB->Cases)
11580           CurMF->insert(BBI, BTC.ThisBB);
11581 
11582         // Fill in fields of the BitTestBlock.
11583         BTB->Parent = CurMBB;
11584         BTB->Default = Fallthrough;
11585 
11586         BTB->DefaultProb = UnhandledProbs;
11587         // If the cases in bit test don't form a contiguous range, we evenly
11588         // distribute the probability on the edge to Fallthrough to two
11589         // successors of CurMBB.
11590         if (!BTB->ContiguousRange) {
11591           BTB->Prob += DefaultProb / 2;
11592           BTB->DefaultProb -= DefaultProb / 2;
11593         }
11594 
11595         if (FallthroughUnreachable)
11596           BTB->FallthroughUnreachable = true;
11597 
11598         // If we're in the right place, emit the bit test header right now.
11599         if (CurMBB == SwitchMBB) {
11600           visitBitTestHeader(*BTB, SwitchMBB);
11601           BTB->Emitted = true;
11602         }
11603         break;
11604       }
11605       case CC_Range: {
11606         const Value *RHS, *LHS, *MHS;
11607         ISD::CondCode CC;
11608         if (I->Low == I->High) {
11609           // Check Cond == I->Low.
11610           CC = ISD::SETEQ;
11611           LHS = Cond;
11612           RHS=I->Low;
11613           MHS = nullptr;
11614         } else {
11615           // Check I->Low <= Cond <= I->High.
11616           CC = ISD::SETLE;
11617           LHS = I->Low;
11618           MHS = Cond;
11619           RHS = I->High;
11620         }
11621 
11622         // If Fallthrough is unreachable, fold away the comparison.
11623         if (FallthroughUnreachable)
11624           CC = ISD::SETTRUE;
11625 
11626         // The false probability is the sum of all unhandled cases.
11627         CaseBlock CB(CC, LHS, RHS, MHS, I->MBB, Fallthrough, CurMBB,
11628                      getCurSDLoc(), I->Prob, UnhandledProbs);
11629 
11630         if (CurMBB == SwitchMBB)
11631           visitSwitchCase(CB, SwitchMBB);
11632         else
11633           SL->SwitchCases.push_back(CB);
11634 
11635         break;
11636       }
11637     }
11638     CurMBB = Fallthrough;
11639   }
11640 }
11641 
11642 void SelectionDAGBuilder::splitWorkItem(SwitchWorkList &WorkList,
11643                                         const SwitchWorkListItem &W,
11644                                         Value *Cond,
11645                                         MachineBasicBlock *SwitchMBB) {
11646   assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) &&
11647          "Clusters not sorted?");
11648   assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!");
11649 
11650   auto [LastLeft, FirstRight, LeftProb, RightProb] =
11651       SL->computeSplitWorkItemInfo(W);
11652 
11653   // Use the first element on the right as pivot since we will make less-than
11654   // comparisons against it.
11655   CaseClusterIt PivotCluster = FirstRight;
11656   assert(PivotCluster > W.FirstCluster);
11657   assert(PivotCluster <= W.LastCluster);
11658 
11659   CaseClusterIt FirstLeft = W.FirstCluster;
11660   CaseClusterIt LastRight = W.LastCluster;
11661 
11662   const ConstantInt *Pivot = PivotCluster->Low;
11663 
11664   // New blocks will be inserted immediately after the current one.
11665   MachineFunction::iterator BBI(W.MBB);
11666   ++BBI;
11667 
11668   // We will branch to the LHS if Value < Pivot. If LHS is a single cluster,
11669   // we can branch to its destination directly if it's squeezed exactly in
11670   // between the known lower bound and Pivot - 1.
11671   MachineBasicBlock *LeftMBB;
11672   if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&
11673       FirstLeft->Low == W.GE &&
11674       (FirstLeft->High->getValue() + 1LL) == Pivot->getValue()) {
11675     LeftMBB = FirstLeft->MBB;
11676   } else {
11677     LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
11678     FuncInfo.MF->insert(BBI, LeftMBB);
11679     WorkList.push_back(
11680         {LeftMBB, FirstLeft, LastLeft, W.GE, Pivot, W.DefaultProb / 2});
11681     // Put Cond in a virtual register to make it available from the new blocks.
11682     ExportFromCurrentBlock(Cond);
11683   }
11684 
11685   // Similarly, we will branch to the RHS if Value >= Pivot. If RHS is a
11686   // single cluster, RHS.Low == Pivot, and we can branch to its destination
11687   // directly if RHS.High equals the current upper bound.
11688   MachineBasicBlock *RightMBB;
11689   if (FirstRight == LastRight && FirstRight->Kind == CC_Range &&
11690       W.LT && (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) {
11691     RightMBB = FirstRight->MBB;
11692   } else {
11693     RightMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
11694     FuncInfo.MF->insert(BBI, RightMBB);
11695     WorkList.push_back(
11696         {RightMBB, FirstRight, LastRight, Pivot, W.LT, W.DefaultProb / 2});
11697     // Put Cond in a virtual register to make it available from the new blocks.
11698     ExportFromCurrentBlock(Cond);
11699   }
11700 
11701   // Create the CaseBlock record that will be used to lower the branch.
11702   CaseBlock CB(ISD::SETLT, Cond, Pivot, nullptr, LeftMBB, RightMBB, W.MBB,
11703                getCurSDLoc(), LeftProb, RightProb);
11704 
11705   if (W.MBB == SwitchMBB)
11706     visitSwitchCase(CB, SwitchMBB);
11707   else
11708     SL->SwitchCases.push_back(CB);
11709 }
11710 
11711 // Scale CaseProb after peeling a case with the probablity of PeeledCaseProb
11712 // from the swith statement.
11713 static BranchProbability scaleCaseProbality(BranchProbability CaseProb,
11714                                             BranchProbability PeeledCaseProb) {
11715   if (PeeledCaseProb == BranchProbability::getOne())
11716     return BranchProbability::getZero();
11717   BranchProbability SwitchProb = PeeledCaseProb.getCompl();
11718 
11719   uint32_t Numerator = CaseProb.getNumerator();
11720   uint32_t Denominator = SwitchProb.scale(CaseProb.getDenominator());
11721   return BranchProbability(Numerator, std::max(Numerator, Denominator));
11722 }
11723 
11724 // Try to peel the top probability case if it exceeds the threshold.
11725 // Return current MachineBasicBlock for the switch statement if the peeling
11726 // does not occur.
11727 // If the peeling is performed, return the newly created MachineBasicBlock
11728 // for the peeled switch statement. Also update Clusters to remove the peeled
11729 // case. PeeledCaseProb is the BranchProbability for the peeled case.
11730 MachineBasicBlock *SelectionDAGBuilder::peelDominantCaseCluster(
11731     const SwitchInst &SI, CaseClusterVector &Clusters,
11732     BranchProbability &PeeledCaseProb) {
11733   MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
11734   // Don't perform if there is only one cluster or optimizing for size.
11735   if (SwitchPeelThreshold > 100 || !FuncInfo.BPI || Clusters.size() < 2 ||
11736       TM.getOptLevel() == CodeGenOptLevel::None ||
11737       SwitchMBB->getParent()->getFunction().hasMinSize())
11738     return SwitchMBB;
11739 
11740   BranchProbability TopCaseProb = BranchProbability(SwitchPeelThreshold, 100);
11741   unsigned PeeledCaseIndex = 0;
11742   bool SwitchPeeled = false;
11743   for (unsigned Index = 0; Index < Clusters.size(); ++Index) {
11744     CaseCluster &CC = Clusters[Index];
11745     if (CC.Prob < TopCaseProb)
11746       continue;
11747     TopCaseProb = CC.Prob;
11748     PeeledCaseIndex = Index;
11749     SwitchPeeled = true;
11750   }
11751   if (!SwitchPeeled)
11752     return SwitchMBB;
11753 
11754   LLVM_DEBUG(dbgs() << "Peeled one top case in switch stmt, prob: "
11755                     << TopCaseProb << "\n");
11756 
11757   // Record the MBB for the peeled switch statement.
11758   MachineFunction::iterator BBI(SwitchMBB);
11759   ++BBI;
11760   MachineBasicBlock *PeeledSwitchMBB =
11761       FuncInfo.MF->CreateMachineBasicBlock(SwitchMBB->getBasicBlock());
11762   FuncInfo.MF->insert(BBI, PeeledSwitchMBB);
11763 
11764   ExportFromCurrentBlock(SI.getCondition());
11765   auto PeeledCaseIt = Clusters.begin() + PeeledCaseIndex;
11766   SwitchWorkListItem W = {SwitchMBB, PeeledCaseIt, PeeledCaseIt,
11767                           nullptr,   nullptr,      TopCaseProb.getCompl()};
11768   lowerWorkItem(W, SI.getCondition(), SwitchMBB, PeeledSwitchMBB);
11769 
11770   Clusters.erase(PeeledCaseIt);
11771   for (CaseCluster &CC : Clusters) {
11772     LLVM_DEBUG(
11773         dbgs() << "Scale the probablity for one cluster, before scaling: "
11774                << CC.Prob << "\n");
11775     CC.Prob = scaleCaseProbality(CC.Prob, TopCaseProb);
11776     LLVM_DEBUG(dbgs() << "After scaling: " << CC.Prob << "\n");
11777   }
11778   PeeledCaseProb = TopCaseProb;
11779   return PeeledSwitchMBB;
11780 }
11781 
11782 void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
11783   // Extract cases from the switch.
11784   BranchProbabilityInfo *BPI = FuncInfo.BPI;
11785   CaseClusterVector Clusters;
11786   Clusters.reserve(SI.getNumCases());
11787   for (auto I : SI.cases()) {
11788     MachineBasicBlock *Succ = FuncInfo.MBBMap[I.getCaseSuccessor()];
11789     const ConstantInt *CaseVal = I.getCaseValue();
11790     BranchProbability Prob =
11791         BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
11792             : BranchProbability(1, SI.getNumCases() + 1);
11793     Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
11794   }
11795 
11796   MachineBasicBlock *DefaultMBB = FuncInfo.MBBMap[SI.getDefaultDest()];
11797 
11798   // Cluster adjacent cases with the same destination. We do this at all
11799   // optimization levels because it's cheap to do and will make codegen faster
11800   // if there are many clusters.
11801   sortAndRangeify(Clusters);
11802 
11803   // The branch probablity of the peeled case.
11804   BranchProbability PeeledCaseProb = BranchProbability::getZero();
11805   MachineBasicBlock *PeeledSwitchMBB =
11806       peelDominantCaseCluster(SI, Clusters, PeeledCaseProb);
11807 
11808   // If there is only the default destination, jump there directly.
11809   MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
11810   if (Clusters.empty()) {
11811     assert(PeeledSwitchMBB == SwitchMBB);
11812     SwitchMBB->addSuccessor(DefaultMBB);
11813     if (DefaultMBB != NextBlock(SwitchMBB)) {
11814       DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
11815                               getControlRoot(), DAG.getBasicBlock(DefaultMBB)));
11816     }
11817     return;
11818   }
11819 
11820   SL->findJumpTables(Clusters, &SI, getCurSDLoc(), DefaultMBB, DAG.getPSI(),
11821                      DAG.getBFI());
11822   SL->findBitTestClusters(Clusters, &SI);
11823 
11824   LLVM_DEBUG({
11825     dbgs() << "Case clusters: ";
11826     for (const CaseCluster &C : Clusters) {
11827       if (C.Kind == CC_JumpTable)
11828         dbgs() << "JT:";
11829       if (C.Kind == CC_BitTests)
11830         dbgs() << "BT:";
11831 
11832       C.Low->getValue().print(dbgs(), true);
11833       if (C.Low != C.High) {
11834         dbgs() << '-';
11835         C.High->getValue().print(dbgs(), true);
11836       }
11837       dbgs() << ' ';
11838     }
11839     dbgs() << '\n';
11840   });
11841 
11842   assert(!Clusters.empty());
11843   SwitchWorkList WorkList;
11844   CaseClusterIt First = Clusters.begin();
11845   CaseClusterIt Last = Clusters.end() - 1;
11846   auto DefaultProb = getEdgeProbability(PeeledSwitchMBB, DefaultMBB);
11847   // Scale the branchprobability for DefaultMBB if the peel occurs and
11848   // DefaultMBB is not replaced.
11849   if (PeeledCaseProb != BranchProbability::getZero() &&
11850       DefaultMBB == FuncInfo.MBBMap[SI.getDefaultDest()])
11851     DefaultProb = scaleCaseProbality(DefaultProb, PeeledCaseProb);
11852   WorkList.push_back(
11853       {PeeledSwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
11854 
11855   while (!WorkList.empty()) {
11856     SwitchWorkListItem W = WorkList.pop_back_val();
11857     unsigned NumClusters = W.LastCluster - W.FirstCluster + 1;
11858 
11859     if (NumClusters > 3 && TM.getOptLevel() != CodeGenOptLevel::None &&
11860         !DefaultMBB->getParent()->getFunction().hasMinSize()) {
11861       // For optimized builds, lower large range as a balanced binary tree.
11862       splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB);
11863       continue;
11864     }
11865 
11866     lowerWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB);
11867   }
11868 }
11869 
11870 void SelectionDAGBuilder::visitStepVector(const CallInst &I) {
11871   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11872   auto DL = getCurSDLoc();
11873   EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
11874   setValue(&I, DAG.getStepVector(DL, ResultVT));
11875 }
11876 
11877 void SelectionDAGBuilder::visitVectorReverse(const CallInst &I) {
11878   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11879   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
11880 
11881   SDLoc DL = getCurSDLoc();
11882   SDValue V = getValue(I.getOperand(0));
11883   assert(VT == V.getValueType() && "Malformed vector.reverse!");
11884 
11885   if (VT.isScalableVector()) {
11886     setValue(&I, DAG.getNode(ISD::VECTOR_REVERSE, DL, VT, V));
11887     return;
11888   }
11889 
11890   // Use VECTOR_SHUFFLE for the fixed-length vector
11891   // to maintain existing behavior.
11892   SmallVector<int, 8> Mask;
11893   unsigned NumElts = VT.getVectorMinNumElements();
11894   for (unsigned i = 0; i != NumElts; ++i)
11895     Mask.push_back(NumElts - 1 - i);
11896 
11897   setValue(&I, DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), Mask));
11898 }
11899 
11900 void SelectionDAGBuilder::visitVectorDeinterleave(const CallInst &I) {
11901   auto DL = getCurSDLoc();
11902   SDValue InVec = getValue(I.getOperand(0));
11903   EVT OutVT =
11904       InVec.getValueType().getHalfNumVectorElementsVT(*DAG.getContext());
11905 
11906   unsigned OutNumElts = OutVT.getVectorMinNumElements();
11907 
11908   // ISD Node needs the input vectors split into two equal parts
11909   SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OutVT, InVec,
11910                            DAG.getVectorIdxConstant(0, DL));
11911   SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OutVT, InVec,
11912                            DAG.getVectorIdxConstant(OutNumElts, DL));
11913 
11914   // Use VECTOR_SHUFFLE for fixed-length vectors to benefit from existing
11915   // legalisation and combines.
11916   if (OutVT.isFixedLengthVector()) {
11917     SDValue Even = DAG.getVectorShuffle(OutVT, DL, Lo, Hi,
11918                                         createStrideMask(0, 2, OutNumElts));
11919     SDValue Odd = DAG.getVectorShuffle(OutVT, DL, Lo, Hi,
11920                                        createStrideMask(1, 2, OutNumElts));
11921     SDValue Res = DAG.getMergeValues({Even, Odd}, getCurSDLoc());
11922     setValue(&I, Res);
11923     return;
11924   }
11925 
11926   SDValue Res = DAG.getNode(ISD::VECTOR_DEINTERLEAVE, DL,
11927                             DAG.getVTList(OutVT, OutVT), Lo, Hi);
11928   setValue(&I, Res);
11929 }
11930 
11931 void SelectionDAGBuilder::visitVectorInterleave(const CallInst &I) {
11932   auto DL = getCurSDLoc();
11933   EVT InVT = getValue(I.getOperand(0)).getValueType();
11934   SDValue InVec0 = getValue(I.getOperand(0));
11935   SDValue InVec1 = getValue(I.getOperand(1));
11936   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11937   EVT OutVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
11938 
11939   // Use VECTOR_SHUFFLE for fixed-length vectors to benefit from existing
11940   // legalisation and combines.
11941   if (OutVT.isFixedLengthVector()) {
11942     unsigned NumElts = InVT.getVectorMinNumElements();
11943     SDValue V = DAG.getNode(ISD::CONCAT_VECTORS, DL, OutVT, InVec0, InVec1);
11944     setValue(&I, DAG.getVectorShuffle(OutVT, DL, V, DAG.getUNDEF(OutVT),
11945                                       createInterleaveMask(NumElts, 2)));
11946     return;
11947   }
11948 
11949   SDValue Res = DAG.getNode(ISD::VECTOR_INTERLEAVE, DL,
11950                             DAG.getVTList(InVT, InVT), InVec0, InVec1);
11951   Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, OutVT, Res.getValue(0),
11952                     Res.getValue(1));
11953   setValue(&I, Res);
11954 }
11955 
11956 void SelectionDAGBuilder::visitFreeze(const FreezeInst &I) {
11957   SmallVector<EVT, 4> ValueVTs;
11958   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
11959                   ValueVTs);
11960   unsigned NumValues = ValueVTs.size();
11961   if (NumValues == 0) return;
11962 
11963   SmallVector<SDValue, 4> Values(NumValues);
11964   SDValue Op = getValue(I.getOperand(0));
11965 
11966   for (unsigned i = 0; i != NumValues; ++i)
11967     Values[i] = DAG.getNode(ISD::FREEZE, getCurSDLoc(), ValueVTs[i],
11968                             SDValue(Op.getNode(), Op.getResNo() + i));
11969 
11970   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
11971                            DAG.getVTList(ValueVTs), Values));
11972 }
11973 
11974 void SelectionDAGBuilder::visitVectorSplice(const CallInst &I) {
11975   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11976   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
11977 
11978   SDLoc DL = getCurSDLoc();
11979   SDValue V1 = getValue(I.getOperand(0));
11980   SDValue V2 = getValue(I.getOperand(1));
11981   int64_t Imm = cast<ConstantInt>(I.getOperand(2))->getSExtValue();
11982 
11983   // VECTOR_SHUFFLE doesn't support a scalable mask so use a dedicated node.
11984   if (VT.isScalableVector()) {
11985     MVT IdxVT = TLI.getVectorIdxTy(DAG.getDataLayout());
11986     setValue(&I, DAG.getNode(ISD::VECTOR_SPLICE, DL, VT, V1, V2,
11987                              DAG.getConstant(Imm, DL, IdxVT)));
11988     return;
11989   }
11990 
11991   unsigned NumElts = VT.getVectorNumElements();
11992 
11993   uint64_t Idx = (NumElts + Imm) % NumElts;
11994 
11995   // Use VECTOR_SHUFFLE to maintain original behaviour for fixed-length vectors.
11996   SmallVector<int, 8> Mask;
11997   for (unsigned i = 0; i < NumElts; ++i)
11998     Mask.push_back(Idx + i);
11999   setValue(&I, DAG.getVectorShuffle(VT, DL, V1, V2, Mask));
12000 }
12001 
12002 // Consider the following MIR after SelectionDAG, which produces output in
12003 // phyregs in the first case or virtregs in the second case.
12004 //
12005 // INLINEASM_BR ..., implicit-def $ebx, ..., implicit-def $edx
12006 // %5:gr32 = COPY $ebx
12007 // %6:gr32 = COPY $edx
12008 // %1:gr32 = COPY %6:gr32
12009 // %0:gr32 = COPY %5:gr32
12010 //
12011 // INLINEASM_BR ..., def %5:gr32, ..., def %6:gr32
12012 // %1:gr32 = COPY %6:gr32
12013 // %0:gr32 = COPY %5:gr32
12014 //
12015 // Given %0, we'd like to return $ebx in the first case and %5 in the second.
12016 // Given %1, we'd like to return $edx in the first case and %6 in the second.
12017 //
12018 // If a callbr has outputs, it will have a single mapping in FuncInfo.ValueMap
12019 // to a single virtreg (such as %0). The remaining outputs monotonically
12020 // increase in virtreg number from there. If a callbr has no outputs, then it
12021 // should not have a corresponding callbr landingpad; in fact, the callbr
12022 // landingpad would not even be able to refer to such a callbr.
12023 static Register FollowCopyChain(MachineRegisterInfo &MRI, Register Reg) {
12024   MachineInstr *MI = MRI.def_begin(Reg)->getParent();
12025   // There is definitely at least one copy.
12026   assert(MI->getOpcode() == TargetOpcode::COPY &&
12027          "start of copy chain MUST be COPY");
12028   Reg = MI->getOperand(1).getReg();
12029   MI = MRI.def_begin(Reg)->getParent();
12030   // There may be an optional second copy.
12031   if (MI->getOpcode() == TargetOpcode::COPY) {
12032     assert(Reg.isVirtual() && "expected COPY of virtual register");
12033     Reg = MI->getOperand(1).getReg();
12034     assert(Reg.isPhysical() && "expected COPY of physical register");
12035     MI = MRI.def_begin(Reg)->getParent();
12036   }
12037   // The start of the chain must be an INLINEASM_BR.
12038   assert(MI->getOpcode() == TargetOpcode::INLINEASM_BR &&
12039          "end of copy chain MUST be INLINEASM_BR");
12040   return Reg;
12041 }
12042 
12043 // We must do this walk rather than the simpler
12044 //   setValue(&I, getCopyFromRegs(CBR, CBR->getType()));
12045 // otherwise we will end up with copies of virtregs only valid along direct
12046 // edges.
12047 void SelectionDAGBuilder::visitCallBrLandingPad(const CallInst &I) {
12048   SmallVector<EVT, 8> ResultVTs;
12049   SmallVector<SDValue, 8> ResultValues;
12050   const auto *CBR =
12051       cast<CallBrInst>(I.getParent()->getUniquePredecessor()->getTerminator());
12052 
12053   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12054   const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
12055   MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
12056 
12057   unsigned InitialDef = FuncInfo.ValueMap[CBR];
12058   SDValue Chain = DAG.getRoot();
12059 
12060   // Re-parse the asm constraints string.
12061   TargetLowering::AsmOperandInfoVector TargetConstraints =
12062       TLI.ParseConstraints(DAG.getDataLayout(), TRI, *CBR);
12063   for (auto &T : TargetConstraints) {
12064     SDISelAsmOperandInfo OpInfo(T);
12065     if (OpInfo.Type != InlineAsm::isOutput)
12066       continue;
12067 
12068     // Pencil in OpInfo.ConstraintType and OpInfo.ConstraintVT based on the
12069     // individual constraint.
12070     TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
12071 
12072     switch (OpInfo.ConstraintType) {
12073     case TargetLowering::C_Register:
12074     case TargetLowering::C_RegisterClass: {
12075       // Fill in OpInfo.AssignedRegs.Regs.
12076       getRegistersForValue(DAG, getCurSDLoc(), OpInfo, OpInfo);
12077 
12078       // getRegistersForValue may produce 1 to many registers based on whether
12079       // the OpInfo.ConstraintVT is legal on the target or not.
12080       for (size_t i = 0, e = OpInfo.AssignedRegs.Regs.size(); i != e; ++i) {
12081         Register OriginalDef = FollowCopyChain(MRI, InitialDef++);
12082         if (Register::isPhysicalRegister(OriginalDef))
12083           FuncInfo.MBB->addLiveIn(OriginalDef);
12084         // Update the assigned registers to use the original defs.
12085         OpInfo.AssignedRegs.Regs[i] = OriginalDef;
12086       }
12087 
12088       SDValue V = OpInfo.AssignedRegs.getCopyFromRegs(
12089           DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, CBR);
12090       ResultValues.push_back(V);
12091       ResultVTs.push_back(OpInfo.ConstraintVT);
12092       break;
12093     }
12094     case TargetLowering::C_Other: {
12095       SDValue Flag;
12096       SDValue V = TLI.LowerAsmOutputForConstraint(Chain, Flag, getCurSDLoc(),
12097                                                   OpInfo, DAG);
12098       ++InitialDef;
12099       ResultValues.push_back(V);
12100       ResultVTs.push_back(OpInfo.ConstraintVT);
12101       break;
12102     }
12103     default:
12104       break;
12105     }
12106   }
12107   SDValue V = DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
12108                           DAG.getVTList(ResultVTs), ResultValues);
12109   setValue(&I, V);
12110 }
12111