xref: /llvm-project/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp (revision 62d0c01c2c9adce67f2e1adb9feecd7ba1a97814)
1 //===- SelectionDAGBuilder.cpp - Selection-DAG building -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements routines for translating from LLVM IR into SelectionDAG IR.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "SelectionDAGBuilder.h"
14 #include "SDNodeDbgValue.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/BitVector.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/SmallSet.h"
21 #include "llvm/ADT/StringRef.h"
22 #include "llvm/ADT/Twine.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/Analysis/BranchProbabilityInfo.h"
25 #include "llvm/Analysis/ConstantFolding.h"
26 #include "llvm/Analysis/Loads.h"
27 #include "llvm/Analysis/MemoryLocation.h"
28 #include "llvm/Analysis/TargetLibraryInfo.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/Analysis/VectorUtils.h"
31 #include "llvm/CodeGen/Analysis.h"
32 #include "llvm/CodeGen/AssignmentTrackingAnalysis.h"
33 #include "llvm/CodeGen/CodeGenCommonISel.h"
34 #include "llvm/CodeGen/FunctionLoweringInfo.h"
35 #include "llvm/CodeGen/GCMetadata.h"
36 #include "llvm/CodeGen/ISDOpcodes.h"
37 #include "llvm/CodeGen/MachineBasicBlock.h"
38 #include "llvm/CodeGen/MachineFrameInfo.h"
39 #include "llvm/CodeGen/MachineFunction.h"
40 #include "llvm/CodeGen/MachineInstrBuilder.h"
41 #include "llvm/CodeGen/MachineInstrBundleIterator.h"
42 #include "llvm/CodeGen/MachineMemOperand.h"
43 #include "llvm/CodeGen/MachineModuleInfo.h"
44 #include "llvm/CodeGen/MachineOperand.h"
45 #include "llvm/CodeGen/MachineRegisterInfo.h"
46 #include "llvm/CodeGen/RuntimeLibcalls.h"
47 #include "llvm/CodeGen/SelectionDAG.h"
48 #include "llvm/CodeGen/SelectionDAGTargetInfo.h"
49 #include "llvm/CodeGen/StackMaps.h"
50 #include "llvm/CodeGen/SwiftErrorValueTracking.h"
51 #include "llvm/CodeGen/TargetFrameLowering.h"
52 #include "llvm/CodeGen/TargetInstrInfo.h"
53 #include "llvm/CodeGen/TargetOpcodes.h"
54 #include "llvm/CodeGen/TargetRegisterInfo.h"
55 #include "llvm/CodeGen/TargetSubtargetInfo.h"
56 #include "llvm/CodeGen/WinEHFuncInfo.h"
57 #include "llvm/IR/Argument.h"
58 #include "llvm/IR/Attributes.h"
59 #include "llvm/IR/BasicBlock.h"
60 #include "llvm/IR/CFG.h"
61 #include "llvm/IR/CallingConv.h"
62 #include "llvm/IR/Constant.h"
63 #include "llvm/IR/ConstantRange.h"
64 #include "llvm/IR/Constants.h"
65 #include "llvm/IR/DataLayout.h"
66 #include "llvm/IR/DebugInfo.h"
67 #include "llvm/IR/DebugInfoMetadata.h"
68 #include "llvm/IR/DerivedTypes.h"
69 #include "llvm/IR/DiagnosticInfo.h"
70 #include "llvm/IR/EHPersonalities.h"
71 #include "llvm/IR/Function.h"
72 #include "llvm/IR/GetElementPtrTypeIterator.h"
73 #include "llvm/IR/InlineAsm.h"
74 #include "llvm/IR/InstrTypes.h"
75 #include "llvm/IR/Instructions.h"
76 #include "llvm/IR/IntrinsicInst.h"
77 #include "llvm/IR/Intrinsics.h"
78 #include "llvm/IR/IntrinsicsAArch64.h"
79 #include "llvm/IR/IntrinsicsAMDGPU.h"
80 #include "llvm/IR/IntrinsicsWebAssembly.h"
81 #include "llvm/IR/LLVMContext.h"
82 #include "llvm/IR/Metadata.h"
83 #include "llvm/IR/Module.h"
84 #include "llvm/IR/Operator.h"
85 #include "llvm/IR/PatternMatch.h"
86 #include "llvm/IR/Statepoint.h"
87 #include "llvm/IR/Type.h"
88 #include "llvm/IR/User.h"
89 #include "llvm/IR/Value.h"
90 #include "llvm/MC/MCContext.h"
91 #include "llvm/Support/AtomicOrdering.h"
92 #include "llvm/Support/Casting.h"
93 #include "llvm/Support/CommandLine.h"
94 #include "llvm/Support/Compiler.h"
95 #include "llvm/Support/Debug.h"
96 #include "llvm/Support/MathExtras.h"
97 #include "llvm/Support/raw_ostream.h"
98 #include "llvm/Target/TargetIntrinsicInfo.h"
99 #include "llvm/Target/TargetMachine.h"
100 #include "llvm/Target/TargetOptions.h"
101 #include "llvm/TargetParser/Triple.h"
102 #include "llvm/Transforms/Utils/Local.h"
103 #include <cstddef>
104 #include <iterator>
105 #include <limits>
106 #include <optional>
107 #include <tuple>
108 
109 using namespace llvm;
110 using namespace PatternMatch;
111 using namespace SwitchCG;
112 
113 #define DEBUG_TYPE "isel"
114 
115 /// LimitFloatPrecision - Generate low-precision inline sequences for
116 /// some float libcalls (6, 8 or 12 bits).
117 static unsigned LimitFloatPrecision;
118 
119 static cl::opt<bool>
120     InsertAssertAlign("insert-assert-align", cl::init(true),
121                       cl::desc("Insert the experimental `assertalign` node."),
122                       cl::ReallyHidden);
123 
124 static cl::opt<unsigned, true>
125     LimitFPPrecision("limit-float-precision",
126                      cl::desc("Generate low-precision inline sequences "
127                               "for some float libcalls"),
128                      cl::location(LimitFloatPrecision), cl::Hidden,
129                      cl::init(0));
130 
131 static cl::opt<unsigned> SwitchPeelThreshold(
132     "switch-peel-threshold", cl::Hidden, cl::init(66),
133     cl::desc("Set the case probability threshold for peeling the case from a "
134              "switch statement. A value greater than 100 will void this "
135              "optimization"));
136 
137 // Limit the width of DAG chains. This is important in general to prevent
138 // DAG-based analysis from blowing up. For example, alias analysis and
139 // load clustering may not complete in reasonable time. It is difficult to
140 // recognize and avoid this situation within each individual analysis, and
141 // future analyses are likely to have the same behavior. Limiting DAG width is
142 // the safe approach and will be especially important with global DAGs.
143 //
144 // MaxParallelChains default is arbitrarily high to avoid affecting
145 // optimization, but could be lowered to improve compile time. Any ld-ld-st-st
146 // sequence over this should have been converted to llvm.memcpy by the
147 // frontend. It is easy to induce this behavior with .ll code such as:
148 // %buffer = alloca [4096 x i8]
149 // %data = load [4096 x i8]* %argPtr
150 // store [4096 x i8] %data, [4096 x i8]* %buffer
151 static const unsigned MaxParallelChains = 64;
152 
153 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
154                                       const SDValue *Parts, unsigned NumParts,
155                                       MVT PartVT, EVT ValueVT, const Value *V,
156                                       SDValue InChain,
157                                       std::optional<CallingConv::ID> CC);
158 
159 /// getCopyFromParts - Create a value that contains the specified legal parts
160 /// combined into the value they represent.  If the parts combine to a type
161 /// larger than ValueVT then AssertOp can be used to specify whether the extra
162 /// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
163 /// (ISD::AssertSext).
164 static SDValue
165 getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts,
166                  unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V,
167                  SDValue InChain,
168                  std::optional<CallingConv::ID> CC = std::nullopt,
169                  std::optional<ISD::NodeType> AssertOp = std::nullopt) {
170   // Let the target assemble the parts if it wants to
171   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
172   if (SDValue Val = TLI.joinRegisterPartsIntoValue(DAG, DL, Parts, NumParts,
173                                                    PartVT, ValueVT, CC))
174     return Val;
175 
176   if (ValueVT.isVector())
177     return getCopyFromPartsVector(DAG, DL, Parts, NumParts, PartVT, ValueVT, V,
178                                   InChain, CC);
179 
180   assert(NumParts > 0 && "No parts to assemble!");
181   SDValue Val = Parts[0];
182 
183   if (NumParts > 1) {
184     // Assemble the value from multiple parts.
185     if (ValueVT.isInteger()) {
186       unsigned PartBits = PartVT.getSizeInBits();
187       unsigned ValueBits = ValueVT.getSizeInBits();
188 
189       // Assemble the power of 2 part.
190       unsigned RoundParts = llvm::bit_floor(NumParts);
191       unsigned RoundBits = PartBits * RoundParts;
192       EVT RoundVT = RoundBits == ValueBits ?
193         ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
194       SDValue Lo, Hi;
195 
196       EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
197 
198       if (RoundParts > 2) {
199         Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2, PartVT, HalfVT, V,
200                               InChain);
201         Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2, RoundParts / 2,
202                               PartVT, HalfVT, V, InChain);
203       } else {
204         Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]);
205         Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]);
206       }
207 
208       if (DAG.getDataLayout().isBigEndian())
209         std::swap(Lo, Hi);
210 
211       Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi);
212 
213       if (RoundParts < NumParts) {
214         // Assemble the trailing non-power-of-2 part.
215         unsigned OddParts = NumParts - RoundParts;
216         EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
217         Hi = getCopyFromParts(DAG, DL, Parts + RoundParts, OddParts, PartVT,
218                               OddVT, V, InChain, CC);
219 
220         // Combine the round and odd parts.
221         Lo = Val;
222         if (DAG.getDataLayout().isBigEndian())
223           std::swap(Lo, Hi);
224         EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
225         Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi);
226         Hi = DAG.getNode(ISD::SHL, DL, TotalVT, Hi,
227                          DAG.getConstant(Lo.getValueSizeInBits(), DL,
228                                          TLI.getShiftAmountTy(
229                                              TotalVT, DAG.getDataLayout())));
230         Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo);
231         Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi);
232       }
233     } else if (PartVT.isFloatingPoint()) {
234       // FP split into multiple FP parts (for ppcf128)
235       assert(ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
236              "Unexpected split");
237       SDValue Lo, Hi;
238       Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]);
239       Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]);
240       if (TLI.hasBigEndianPartOrdering(ValueVT, DAG.getDataLayout()))
241         std::swap(Lo, Hi);
242       Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi);
243     } else {
244       // FP split into integer parts (soft fp)
245       assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
246              !PartVT.isVector() && "Unexpected split");
247       EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
248       Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V,
249                              InChain, CC);
250     }
251   }
252 
253   // There is now one part, held in Val.  Correct it to match ValueVT.
254   // PartEVT is the type of the register class that holds the value.
255   // ValueVT is the type of the inline asm operation.
256   EVT PartEVT = Val.getValueType();
257 
258   if (PartEVT == ValueVT)
259     return Val;
260 
261   if (PartEVT.isInteger() && ValueVT.isFloatingPoint() &&
262       ValueVT.bitsLT(PartEVT)) {
263     // For an FP value in an integer part, we need to truncate to the right
264     // width first.
265     PartEVT = EVT::getIntegerVT(*DAG.getContext(),  ValueVT.getSizeInBits());
266     Val = DAG.getNode(ISD::TRUNCATE, DL, PartEVT, Val);
267   }
268 
269   // Handle types that have the same size.
270   if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits())
271     return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
272 
273   // Handle types with different sizes.
274   if (PartEVT.isInteger() && ValueVT.isInteger()) {
275     if (ValueVT.bitsLT(PartEVT)) {
276       // For a truncate, see if we have any information to
277       // indicate whether the truncated bits will always be
278       // zero or sign-extension.
279       if (AssertOp)
280         Val = DAG.getNode(*AssertOp, DL, PartEVT, Val,
281                           DAG.getValueType(ValueVT));
282       return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
283     }
284     return DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
285   }
286 
287   if (PartEVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
288     // FP_ROUND's are always exact here.
289     if (ValueVT.bitsLT(Val.getValueType())) {
290 
291       SDValue NoChange =
292           DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout()));
293 
294       if (DAG.getMachineFunction().getFunction().getAttributes().hasFnAttr(
295               llvm::Attribute::StrictFP)) {
296         return DAG.getNode(ISD::STRICT_FP_ROUND, DL,
297                            DAG.getVTList(ValueVT, MVT::Other), InChain, Val,
298                            NoChange);
299       }
300 
301       return DAG.getNode(ISD::FP_ROUND, DL, ValueVT, Val, NoChange);
302     }
303 
304     return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val);
305   }
306 
307   // Handle MMX to a narrower integer type by bitcasting MMX to integer and
308   // then truncating.
309   if (PartEVT == MVT::x86mmx && ValueVT.isInteger() &&
310       ValueVT.bitsLT(PartEVT)) {
311     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i64, Val);
312     return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
313   }
314 
315   report_fatal_error("Unknown mismatch in getCopyFromParts!");
316 }
317 
318 static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V,
319                                               const Twine &ErrMsg) {
320   const Instruction *I = dyn_cast_or_null<Instruction>(V);
321   if (!V)
322     return Ctx.emitError(ErrMsg);
323 
324   const char *AsmError = ", possible invalid constraint for vector type";
325   if (const CallInst *CI = dyn_cast<CallInst>(I))
326     if (CI->isInlineAsm())
327       return Ctx.emitError(I, ErrMsg + AsmError);
328 
329   return Ctx.emitError(I, ErrMsg);
330 }
331 
332 /// getCopyFromPartsVector - Create a value that contains the specified legal
333 /// parts combined into the value they represent.  If the parts combine to a
334 /// type larger than ValueVT then AssertOp can be used to specify whether the
335 /// extra bits are known to be zero (ISD::AssertZext) or sign extended from
336 /// ValueVT (ISD::AssertSext).
337 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
338                                       const SDValue *Parts, unsigned NumParts,
339                                       MVT PartVT, EVT ValueVT, const Value *V,
340                                       SDValue InChain,
341                                       std::optional<CallingConv::ID> CallConv) {
342   assert(ValueVT.isVector() && "Not a vector value");
343   assert(NumParts > 0 && "No parts to assemble!");
344   const bool IsABIRegCopy = CallConv.has_value();
345 
346   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
347   SDValue Val = Parts[0];
348 
349   // Handle a multi-element vector.
350   if (NumParts > 1) {
351     EVT IntermediateVT;
352     MVT RegisterVT;
353     unsigned NumIntermediates;
354     unsigned NumRegs;
355 
356     if (IsABIRegCopy) {
357       NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
358           *DAG.getContext(), *CallConv, ValueVT, IntermediateVT,
359           NumIntermediates, RegisterVT);
360     } else {
361       NumRegs =
362           TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
363                                      NumIntermediates, RegisterVT);
364     }
365 
366     assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
367     NumParts = NumRegs; // Silence a compiler warning.
368     assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
369     assert(RegisterVT.getSizeInBits() ==
370            Parts[0].getSimpleValueType().getSizeInBits() &&
371            "Part type sizes don't match!");
372 
373     // Assemble the parts into intermediate operands.
374     SmallVector<SDValue, 8> Ops(NumIntermediates);
375     if (NumIntermediates == NumParts) {
376       // If the register was not expanded, truncate or copy the value,
377       // as appropriate.
378       for (unsigned i = 0; i != NumParts; ++i)
379         Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1, PartVT, IntermediateVT,
380                                   V, InChain, CallConv);
381     } else if (NumParts > 0) {
382       // If the intermediate type was expanded, build the intermediate
383       // operands from the parts.
384       assert(NumParts % NumIntermediates == 0 &&
385              "Must expand into a divisible number of parts!");
386       unsigned Factor = NumParts / NumIntermediates;
387       for (unsigned i = 0; i != NumIntermediates; ++i)
388         Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor, PartVT,
389                                   IntermediateVT, V, InChain, CallConv);
390     }
391 
392     // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
393     // intermediate operands.
394     EVT BuiltVectorTy =
395         IntermediateVT.isVector()
396             ? EVT::getVectorVT(
397                   *DAG.getContext(), IntermediateVT.getScalarType(),
398                   IntermediateVT.getVectorElementCount() * NumParts)
399             : EVT::getVectorVT(*DAG.getContext(),
400                                IntermediateVT.getScalarType(),
401                                NumIntermediates);
402     Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS
403                                                 : ISD::BUILD_VECTOR,
404                       DL, BuiltVectorTy, Ops);
405   }
406 
407   // There is now one part, held in Val.  Correct it to match ValueVT.
408   EVT PartEVT = Val.getValueType();
409 
410   if (PartEVT == ValueVT)
411     return Val;
412 
413   if (PartEVT.isVector()) {
414     // Vector/Vector bitcast.
415     if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
416       return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
417 
418     // If the parts vector has more elements than the value vector, then we
419     // have a vector widening case (e.g. <2 x float> -> <4 x float>).
420     // Extract the elements we want.
421     if (PartEVT.getVectorElementCount() != ValueVT.getVectorElementCount()) {
422       assert((PartEVT.getVectorElementCount().getKnownMinValue() >
423               ValueVT.getVectorElementCount().getKnownMinValue()) &&
424              (PartEVT.getVectorElementCount().isScalable() ==
425               ValueVT.getVectorElementCount().isScalable()) &&
426              "Cannot narrow, it would be a lossy transformation");
427       PartEVT =
428           EVT::getVectorVT(*DAG.getContext(), PartEVT.getVectorElementType(),
429                            ValueVT.getVectorElementCount());
430       Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, PartEVT, Val,
431                         DAG.getVectorIdxConstant(0, DL));
432       if (PartEVT == ValueVT)
433         return Val;
434       if (PartEVT.isInteger() && ValueVT.isFloatingPoint())
435         return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
436 
437       // Vector/Vector bitcast (e.g. <2 x bfloat> -> <2 x half>).
438       if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
439         return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
440     }
441 
442     // Promoted vector extract
443     return DAG.getAnyExtOrTrunc(Val, DL, ValueVT);
444   }
445 
446   // Trivial bitcast if the types are the same size and the destination
447   // vector type is legal.
448   if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits() &&
449       TLI.isTypeLegal(ValueVT))
450     return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
451 
452   if (ValueVT.getVectorNumElements() != 1) {
453      // Certain ABIs require that vectors are passed as integers. For vectors
454      // are the same size, this is an obvious bitcast.
455      if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) {
456        return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
457      } else if (ValueVT.bitsLT(PartEVT)) {
458        const uint64_t ValueSize = ValueVT.getFixedSizeInBits();
459        EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize);
460        // Drop the extra bits.
461        Val = DAG.getNode(ISD::TRUNCATE, DL, IntermediateType, Val);
462        return DAG.getBitcast(ValueVT, Val);
463      }
464 
465      diagnosePossiblyInvalidConstraint(
466          *DAG.getContext(), V, "non-trivial scalar-to-vector conversion");
467      return DAG.getUNDEF(ValueVT);
468   }
469 
470   // Handle cases such as i8 -> <1 x i1>
471   EVT ValueSVT = ValueVT.getVectorElementType();
472   if (ValueVT.getVectorNumElements() == 1 && ValueSVT != PartEVT) {
473     unsigned ValueSize = ValueSVT.getSizeInBits();
474     if (ValueSize == PartEVT.getSizeInBits()) {
475       Val = DAG.getNode(ISD::BITCAST, DL, ValueSVT, Val);
476     } else if (ValueSVT.isFloatingPoint() && PartEVT.isInteger()) {
477       // It's possible a scalar floating point type gets softened to integer and
478       // then promoted to a larger integer. If PartEVT is the larger integer
479       // we need to truncate it and then bitcast to the FP type.
480       assert(ValueSVT.bitsLT(PartEVT) && "Unexpected types");
481       EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize);
482       Val = DAG.getNode(ISD::TRUNCATE, DL, IntermediateType, Val);
483       Val = DAG.getBitcast(ValueSVT, Val);
484     } else {
485       Val = ValueVT.isFloatingPoint()
486                 ? DAG.getFPExtendOrRound(Val, DL, ValueSVT)
487                 : DAG.getAnyExtOrTrunc(Val, DL, ValueSVT);
488     }
489   }
490 
491   return DAG.getBuildVector(ValueVT, DL, Val);
492 }
493 
494 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl,
495                                  SDValue Val, SDValue *Parts, unsigned NumParts,
496                                  MVT PartVT, const Value *V,
497                                  std::optional<CallingConv::ID> CallConv);
498 
499 /// getCopyToParts - Create a series of nodes that contain the specified value
500 /// split into legal parts.  If the parts contain more bits than Val, then, for
501 /// integers, ExtendKind can be used to specify how to generate the extra bits.
502 static void
503 getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
504                unsigned NumParts, MVT PartVT, const Value *V,
505                std::optional<CallingConv::ID> CallConv = std::nullopt,
506                ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
507   // Let the target split the parts if it wants to
508   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
509   if (TLI.splitValueIntoRegisterParts(DAG, DL, Val, Parts, NumParts, PartVT,
510                                       CallConv))
511     return;
512   EVT ValueVT = Val.getValueType();
513 
514   // Handle the vector case separately.
515   if (ValueVT.isVector())
516     return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V,
517                                 CallConv);
518 
519   unsigned OrigNumParts = NumParts;
520   assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) &&
521          "Copying to an illegal type!");
522 
523   if (NumParts == 0)
524     return;
525 
526   assert(!ValueVT.isVector() && "Vector case handled elsewhere");
527   EVT PartEVT = PartVT;
528   if (PartEVT == ValueVT) {
529     assert(NumParts == 1 && "No-op copy with multiple parts!");
530     Parts[0] = Val;
531     return;
532   }
533 
534   unsigned PartBits = PartVT.getSizeInBits();
535   if (NumParts * PartBits > ValueVT.getSizeInBits()) {
536     // If the parts cover more bits than the value has, promote the value.
537     if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
538       assert(NumParts == 1 && "Do not know what to promote to!");
539       Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val);
540     } else {
541       if (ValueVT.isFloatingPoint()) {
542         // FP values need to be bitcast, then extended if they are being put
543         // into a larger container.
544         ValueVT = EVT::getIntegerVT(*DAG.getContext(),  ValueVT.getSizeInBits());
545         Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
546       }
547       assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
548              ValueVT.isInteger() &&
549              "Unknown mismatch!");
550       ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
551       Val = DAG.getNode(ExtendKind, DL, ValueVT, Val);
552       if (PartVT == MVT::x86mmx)
553         Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
554     }
555   } else if (PartBits == ValueVT.getSizeInBits()) {
556     // Different types of the same size.
557     assert(NumParts == 1 && PartEVT != ValueVT);
558     Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
559   } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
560     // If the parts cover less bits than value has, truncate the value.
561     assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
562            ValueVT.isInteger() &&
563            "Unknown mismatch!");
564     ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
565     Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
566     if (PartVT == MVT::x86mmx)
567       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
568   }
569 
570   // The value may have changed - recompute ValueVT.
571   ValueVT = Val.getValueType();
572   assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
573          "Failed to tile the value with PartVT!");
574 
575   if (NumParts == 1) {
576     if (PartEVT != ValueVT) {
577       diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
578                                         "scalar-to-vector conversion failed");
579       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
580     }
581 
582     Parts[0] = Val;
583     return;
584   }
585 
586   // Expand the value into multiple parts.
587   if (NumParts & (NumParts - 1)) {
588     // The number of parts is not a power of 2.  Split off and copy the tail.
589     assert(PartVT.isInteger() && ValueVT.isInteger() &&
590            "Do not know what to expand to!");
591     unsigned RoundParts = llvm::bit_floor(NumParts);
592     unsigned RoundBits = RoundParts * PartBits;
593     unsigned OddParts = NumParts - RoundParts;
594     SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val,
595       DAG.getShiftAmountConstant(RoundBits, ValueVT, DL));
596 
597     getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V,
598                    CallConv);
599 
600     if (DAG.getDataLayout().isBigEndian())
601       // The odd parts were reversed by getCopyToParts - unreverse them.
602       std::reverse(Parts + RoundParts, Parts + NumParts);
603 
604     NumParts = RoundParts;
605     ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
606     Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
607   }
608 
609   // The number of parts is a power of 2.  Repeatedly bisect the value using
610   // EXTRACT_ELEMENT.
611   Parts[0] = DAG.getNode(ISD::BITCAST, DL,
612                          EVT::getIntegerVT(*DAG.getContext(),
613                                            ValueVT.getSizeInBits()),
614                          Val);
615 
616   for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
617     for (unsigned i = 0; i < NumParts; i += StepSize) {
618       unsigned ThisBits = StepSize * PartBits / 2;
619       EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
620       SDValue &Part0 = Parts[i];
621       SDValue &Part1 = Parts[i+StepSize/2];
622 
623       Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
624                           ThisVT, Part0, DAG.getIntPtrConstant(1, DL));
625       Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
626                           ThisVT, Part0, DAG.getIntPtrConstant(0, DL));
627 
628       if (ThisBits == PartBits && ThisVT != PartVT) {
629         Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0);
630         Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1);
631       }
632     }
633   }
634 
635   if (DAG.getDataLayout().isBigEndian())
636     std::reverse(Parts, Parts + OrigNumParts);
637 }
638 
639 static SDValue widenVectorToPartType(SelectionDAG &DAG, SDValue Val,
640                                      const SDLoc &DL, EVT PartVT) {
641   if (!PartVT.isVector())
642     return SDValue();
643 
644   EVT ValueVT = Val.getValueType();
645   EVT PartEVT = PartVT.getVectorElementType();
646   EVT ValueEVT = ValueVT.getVectorElementType();
647   ElementCount PartNumElts = PartVT.getVectorElementCount();
648   ElementCount ValueNumElts = ValueVT.getVectorElementCount();
649 
650   // We only support widening vectors with equivalent element types and
651   // fixed/scalable properties. If a target needs to widen a fixed-length type
652   // to a scalable one, it should be possible to use INSERT_SUBVECTOR below.
653   if (ElementCount::isKnownLE(PartNumElts, ValueNumElts) ||
654       PartNumElts.isScalable() != ValueNumElts.isScalable())
655     return SDValue();
656 
657   // Have a try for bf16 because some targets share its ABI with fp16.
658   if (ValueEVT == MVT::bf16 && PartEVT == MVT::f16) {
659     assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) &&
660            "Cannot widen to illegal type");
661     Val = DAG.getNode(ISD::BITCAST, DL,
662                       ValueVT.changeVectorElementType(MVT::f16), Val);
663   } else if (PartEVT != ValueEVT) {
664     return SDValue();
665   }
666 
667   // Widening a scalable vector to another scalable vector is done by inserting
668   // the vector into a larger undef one.
669   if (PartNumElts.isScalable())
670     return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
671                        Val, DAG.getVectorIdxConstant(0, DL));
672 
673   // Vector widening case, e.g. <2 x float> -> <4 x float>.  Shuffle in
674   // undef elements.
675   SmallVector<SDValue, 16> Ops;
676   DAG.ExtractVectorElements(Val, Ops);
677   SDValue EltUndef = DAG.getUNDEF(PartEVT);
678   Ops.append((PartNumElts - ValueNumElts).getFixedValue(), EltUndef);
679 
680   // FIXME: Use CONCAT for 2x -> 4x.
681   return DAG.getBuildVector(PartVT, DL, Ops);
682 }
683 
684 /// getCopyToPartsVector - Create a series of nodes that contain the specified
685 /// value split into legal parts.
686 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
687                                  SDValue Val, SDValue *Parts, unsigned NumParts,
688                                  MVT PartVT, const Value *V,
689                                  std::optional<CallingConv::ID> CallConv) {
690   EVT ValueVT = Val.getValueType();
691   assert(ValueVT.isVector() && "Not a vector");
692   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
693   const bool IsABIRegCopy = CallConv.has_value();
694 
695   if (NumParts == 1) {
696     EVT PartEVT = PartVT;
697     if (PartEVT == ValueVT) {
698       // Nothing to do.
699     } else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
700       // Bitconvert vector->vector case.
701       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
702     } else if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, PartVT)) {
703       Val = Widened;
704     } else if (PartVT.isVector() &&
705                PartEVT.getVectorElementType().bitsGE(
706                    ValueVT.getVectorElementType()) &&
707                PartEVT.getVectorElementCount() ==
708                    ValueVT.getVectorElementCount()) {
709 
710       // Promoted vector extract
711       Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
712     } else if (PartEVT.isVector() &&
713                PartEVT.getVectorElementType() !=
714                    ValueVT.getVectorElementType() &&
715                TLI.getTypeAction(*DAG.getContext(), ValueVT) ==
716                    TargetLowering::TypeWidenVector) {
717       // Combination of widening and promotion.
718       EVT WidenVT =
719           EVT::getVectorVT(*DAG.getContext(), ValueVT.getVectorElementType(),
720                            PartVT.getVectorElementCount());
721       SDValue Widened = widenVectorToPartType(DAG, Val, DL, WidenVT);
722       Val = DAG.getAnyExtOrTrunc(Widened, DL, PartVT);
723     } else {
724       // Don't extract an integer from a float vector. This can happen if the
725       // FP type gets softened to integer and then promoted. The promotion
726       // prevents it from being picked up by the earlier bitcast case.
727       if (ValueVT.getVectorElementCount().isScalar() &&
728           (!ValueVT.isFloatingPoint() || !PartVT.isInteger())) {
729         Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, PartVT, Val,
730                           DAG.getVectorIdxConstant(0, DL));
731       } else {
732         uint64_t ValueSize = ValueVT.getFixedSizeInBits();
733         assert(PartVT.getFixedSizeInBits() > ValueSize &&
734                "lossy conversion of vector to scalar type");
735         EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize);
736         Val = DAG.getBitcast(IntermediateType, Val);
737         Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
738       }
739     }
740 
741     assert(Val.getValueType() == PartVT && "Unexpected vector part value type");
742     Parts[0] = Val;
743     return;
744   }
745 
746   // Handle a multi-element vector.
747   EVT IntermediateVT;
748   MVT RegisterVT;
749   unsigned NumIntermediates;
750   unsigned NumRegs;
751   if (IsABIRegCopy) {
752     NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
753         *DAG.getContext(), *CallConv, ValueVT, IntermediateVT, NumIntermediates,
754         RegisterVT);
755   } else {
756     NumRegs =
757         TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
758                                    NumIntermediates, RegisterVT);
759   }
760 
761   assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
762   NumParts = NumRegs; // Silence a compiler warning.
763   assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
764 
765   assert(IntermediateVT.isScalableVector() == ValueVT.isScalableVector() &&
766          "Mixing scalable and fixed vectors when copying in parts");
767 
768   std::optional<ElementCount> DestEltCnt;
769 
770   if (IntermediateVT.isVector())
771     DestEltCnt = IntermediateVT.getVectorElementCount() * NumIntermediates;
772   else
773     DestEltCnt = ElementCount::getFixed(NumIntermediates);
774 
775   EVT BuiltVectorTy = EVT::getVectorVT(
776       *DAG.getContext(), IntermediateVT.getScalarType(), *DestEltCnt);
777 
778   if (ValueVT == BuiltVectorTy) {
779     // Nothing to do.
780   } else if (ValueVT.getSizeInBits() == BuiltVectorTy.getSizeInBits()) {
781     // Bitconvert vector->vector case.
782     Val = DAG.getNode(ISD::BITCAST, DL, BuiltVectorTy, Val);
783   } else {
784     if (BuiltVectorTy.getVectorElementType().bitsGT(
785             ValueVT.getVectorElementType())) {
786       // Integer promotion.
787       ValueVT = EVT::getVectorVT(*DAG.getContext(),
788                                  BuiltVectorTy.getVectorElementType(),
789                                  ValueVT.getVectorElementCount());
790       Val = DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
791     }
792 
793     if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, BuiltVectorTy)) {
794       Val = Widened;
795     }
796   }
797 
798   assert(Val.getValueType() == BuiltVectorTy && "Unexpected vector value type");
799 
800   // Split the vector into intermediate operands.
801   SmallVector<SDValue, 8> Ops(NumIntermediates);
802   for (unsigned i = 0; i != NumIntermediates; ++i) {
803     if (IntermediateVT.isVector()) {
804       // This does something sensible for scalable vectors - see the
805       // definition of EXTRACT_SUBVECTOR for further details.
806       unsigned IntermediateNumElts = IntermediateVT.getVectorMinNumElements();
807       Ops[i] =
808           DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, IntermediateVT, Val,
809                       DAG.getVectorIdxConstant(i * IntermediateNumElts, DL));
810     } else {
811       Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, IntermediateVT, Val,
812                            DAG.getVectorIdxConstant(i, DL));
813     }
814   }
815 
816   // Split the intermediate operands into legal parts.
817   if (NumParts == NumIntermediates) {
818     // If the register was not expanded, promote or copy the value,
819     // as appropriate.
820     for (unsigned i = 0; i != NumParts; ++i)
821       getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V, CallConv);
822   } else if (NumParts > 0) {
823     // If the intermediate type was expanded, split each the value into
824     // legal parts.
825     assert(NumIntermediates != 0 && "division by zero");
826     assert(NumParts % NumIntermediates == 0 &&
827            "Must expand into a divisible number of parts!");
828     unsigned Factor = NumParts / NumIntermediates;
829     for (unsigned i = 0; i != NumIntermediates; ++i)
830       getCopyToParts(DAG, DL, Ops[i], &Parts[i * Factor], Factor, PartVT, V,
831                      CallConv);
832   }
833 }
834 
835 RegsForValue::RegsForValue(const SmallVector<unsigned, 4> &regs, MVT regvt,
836                            EVT valuevt, std::optional<CallingConv::ID> CC)
837     : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs),
838       RegCount(1, regs.size()), CallConv(CC) {}
839 
840 RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
841                            const DataLayout &DL, unsigned Reg, Type *Ty,
842                            std::optional<CallingConv::ID> CC) {
843   ComputeValueVTs(TLI, DL, Ty, ValueVTs);
844 
845   CallConv = CC;
846 
847   for (EVT ValueVT : ValueVTs) {
848     unsigned NumRegs =
849         isABIMangled()
850             ? TLI.getNumRegistersForCallingConv(Context, *CC, ValueVT)
851             : TLI.getNumRegisters(Context, ValueVT);
852     MVT RegisterVT =
853         isABIMangled()
854             ? TLI.getRegisterTypeForCallingConv(Context, *CC, ValueVT)
855             : TLI.getRegisterType(Context, ValueVT);
856     for (unsigned i = 0; i != NumRegs; ++i)
857       Regs.push_back(Reg + i);
858     RegVTs.push_back(RegisterVT);
859     RegCount.push_back(NumRegs);
860     Reg += NumRegs;
861   }
862 }
863 
864 SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
865                                       FunctionLoweringInfo &FuncInfo,
866                                       const SDLoc &dl, SDValue &Chain,
867                                       SDValue *Glue, const Value *V) const {
868   // A Value with type {} or [0 x %t] needs no registers.
869   if (ValueVTs.empty())
870     return SDValue();
871 
872   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
873 
874   // Assemble the legal parts into the final values.
875   SmallVector<SDValue, 4> Values(ValueVTs.size());
876   SmallVector<SDValue, 8> Parts;
877   for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
878     // Copy the legal parts from the registers.
879     EVT ValueVT = ValueVTs[Value];
880     unsigned NumRegs = RegCount[Value];
881     MVT RegisterVT = isABIMangled()
882                          ? TLI.getRegisterTypeForCallingConv(
883                                *DAG.getContext(), *CallConv, RegVTs[Value])
884                          : RegVTs[Value];
885 
886     Parts.resize(NumRegs);
887     for (unsigned i = 0; i != NumRegs; ++i) {
888       SDValue P;
889       if (!Glue) {
890         P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
891       } else {
892         P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Glue);
893         *Glue = P.getValue(2);
894       }
895 
896       Chain = P.getValue(1);
897       Parts[i] = P;
898 
899       // If the source register was virtual and if we know something about it,
900       // add an assert node.
901       if (!Register::isVirtualRegister(Regs[Part + i]) ||
902           !RegisterVT.isInteger())
903         continue;
904 
905       const FunctionLoweringInfo::LiveOutInfo *LOI =
906         FuncInfo.GetLiveOutRegInfo(Regs[Part+i]);
907       if (!LOI)
908         continue;
909 
910       unsigned RegSize = RegisterVT.getScalarSizeInBits();
911       unsigned NumSignBits = LOI->NumSignBits;
912       unsigned NumZeroBits = LOI->Known.countMinLeadingZeros();
913 
914       if (NumZeroBits == RegSize) {
915         // The current value is a zero.
916         // Explicitly express that as it would be easier for
917         // optimizations to kick in.
918         Parts[i] = DAG.getConstant(0, dl, RegisterVT);
919         continue;
920       }
921 
922       // FIXME: We capture more information than the dag can represent.  For
923       // now, just use the tightest assertzext/assertsext possible.
924       bool isSExt;
925       EVT FromVT(MVT::Other);
926       if (NumZeroBits) {
927         FromVT = EVT::getIntegerVT(*DAG.getContext(), RegSize - NumZeroBits);
928         isSExt = false;
929       } else if (NumSignBits > 1) {
930         FromVT =
931             EVT::getIntegerVT(*DAG.getContext(), RegSize - NumSignBits + 1);
932         isSExt = true;
933       } else {
934         continue;
935       }
936       // Add an assertion node.
937       assert(FromVT != MVT::Other);
938       Parts[i] = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
939                              RegisterVT, P, DAG.getValueType(FromVT));
940     }
941 
942     Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(), NumRegs,
943                                      RegisterVT, ValueVT, V, Chain, CallConv);
944     Part += NumRegs;
945     Parts.clear();
946   }
947 
948   return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(ValueVTs), Values);
949 }
950 
951 void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG,
952                                  const SDLoc &dl, SDValue &Chain, SDValue *Glue,
953                                  const Value *V,
954                                  ISD::NodeType PreferredExtendType) const {
955   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
956   ISD::NodeType ExtendKind = PreferredExtendType;
957 
958   // Get the list of the values's legal parts.
959   unsigned NumRegs = Regs.size();
960   SmallVector<SDValue, 8> Parts(NumRegs);
961   for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
962     unsigned NumParts = RegCount[Value];
963 
964     MVT RegisterVT = isABIMangled()
965                          ? TLI.getRegisterTypeForCallingConv(
966                                *DAG.getContext(), *CallConv, RegVTs[Value])
967                          : RegVTs[Value];
968 
969     if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT))
970       ExtendKind = ISD::ZERO_EXTEND;
971 
972     getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value), &Parts[Part],
973                    NumParts, RegisterVT, V, CallConv, ExtendKind);
974     Part += NumParts;
975   }
976 
977   // Copy the parts into the registers.
978   SmallVector<SDValue, 8> Chains(NumRegs);
979   for (unsigned i = 0; i != NumRegs; ++i) {
980     SDValue Part;
981     if (!Glue) {
982       Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
983     } else {
984       Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Glue);
985       *Glue = Part.getValue(1);
986     }
987 
988     Chains[i] = Part.getValue(0);
989   }
990 
991   if (NumRegs == 1 || Glue)
992     // If NumRegs > 1 && Glue is used then the use of the last CopyToReg is
993     // flagged to it. That is the CopyToReg nodes and the user are considered
994     // a single scheduling unit. If we create a TokenFactor and return it as
995     // chain, then the TokenFactor is both a predecessor (operand) of the
996     // user as well as a successor (the TF operands are flagged to the user).
997     // c1, f1 = CopyToReg
998     // c2, f2 = CopyToReg
999     // c3     = TokenFactor c1, c2
1000     // ...
1001     //        = op c3, ..., f2
1002     Chain = Chains[NumRegs-1];
1003   else
1004     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
1005 }
1006 
1007 void RegsForValue::AddInlineAsmOperands(InlineAsm::Kind Code, bool HasMatching,
1008                                         unsigned MatchingIdx, const SDLoc &dl,
1009                                         SelectionDAG &DAG,
1010                                         std::vector<SDValue> &Ops) const {
1011   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1012 
1013   InlineAsm::Flag Flag(Code, Regs.size());
1014   if (HasMatching)
1015     Flag.setMatchingOp(MatchingIdx);
1016   else if (!Regs.empty() && Register::isVirtualRegister(Regs.front())) {
1017     // Put the register class of the virtual registers in the flag word.  That
1018     // way, later passes can recompute register class constraints for inline
1019     // assembly as well as normal instructions.
1020     // Don't do this for tied operands that can use the regclass information
1021     // from the def.
1022     const MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
1023     const TargetRegisterClass *RC = MRI.getRegClass(Regs.front());
1024     Flag.setRegClass(RC->getID());
1025   }
1026 
1027   SDValue Res = DAG.getTargetConstant(Flag, dl, MVT::i32);
1028   Ops.push_back(Res);
1029 
1030   if (Code == InlineAsm::Kind::Clobber) {
1031     // Clobbers should always have a 1:1 mapping with registers, and may
1032     // reference registers that have illegal (e.g. vector) types. Hence, we
1033     // shouldn't try to apply any sort of splitting logic to them.
1034     assert(Regs.size() == RegVTs.size() && Regs.size() == ValueVTs.size() &&
1035            "No 1:1 mapping from clobbers to regs?");
1036     Register SP = TLI.getStackPointerRegisterToSaveRestore();
1037     (void)SP;
1038     for (unsigned I = 0, E = ValueVTs.size(); I != E; ++I) {
1039       Ops.push_back(DAG.getRegister(Regs[I], RegVTs[I]));
1040       assert(
1041           (Regs[I] != SP ||
1042            DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment()) &&
1043           "If we clobbered the stack pointer, MFI should know about it.");
1044     }
1045     return;
1046   }
1047 
1048   for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
1049     MVT RegisterVT = RegVTs[Value];
1050     unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value],
1051                                            RegisterVT);
1052     for (unsigned i = 0; i != NumRegs; ++i) {
1053       assert(Reg < Regs.size() && "Mismatch in # registers expected");
1054       unsigned TheReg = Regs[Reg++];
1055       Ops.push_back(DAG.getRegister(TheReg, RegisterVT));
1056     }
1057   }
1058 }
1059 
1060 SmallVector<std::pair<unsigned, TypeSize>, 4>
1061 RegsForValue::getRegsAndSizes() const {
1062   SmallVector<std::pair<unsigned, TypeSize>, 4> OutVec;
1063   unsigned I = 0;
1064   for (auto CountAndVT : zip_first(RegCount, RegVTs)) {
1065     unsigned RegCount = std::get<0>(CountAndVT);
1066     MVT RegisterVT = std::get<1>(CountAndVT);
1067     TypeSize RegisterSize = RegisterVT.getSizeInBits();
1068     for (unsigned E = I + RegCount; I != E; ++I)
1069       OutVec.push_back(std::make_pair(Regs[I], RegisterSize));
1070   }
1071   return OutVec;
1072 }
1073 
1074 void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis *aa,
1075                                AssumptionCache *ac,
1076                                const TargetLibraryInfo *li) {
1077   AA = aa;
1078   AC = ac;
1079   GFI = gfi;
1080   LibInfo = li;
1081   Context = DAG.getContext();
1082   LPadToCallSiteMap.clear();
1083   SL->init(DAG.getTargetLoweringInfo(), TM, DAG.getDataLayout());
1084   AssignmentTrackingEnabled = isAssignmentTrackingEnabled(
1085       *DAG.getMachineFunction().getFunction().getParent());
1086 }
1087 
1088 void SelectionDAGBuilder::clear() {
1089   NodeMap.clear();
1090   UnusedArgNodeMap.clear();
1091   PendingLoads.clear();
1092   PendingExports.clear();
1093   PendingConstrainedFP.clear();
1094   PendingConstrainedFPStrict.clear();
1095   CurInst = nullptr;
1096   HasTailCall = false;
1097   SDNodeOrder = LowestSDNodeOrder;
1098   StatepointLowering.clear();
1099 }
1100 
1101 void SelectionDAGBuilder::clearDanglingDebugInfo() {
1102   DanglingDebugInfoMap.clear();
1103 }
1104 
1105 // Update DAG root to include dependencies on Pending chains.
1106 SDValue SelectionDAGBuilder::updateRoot(SmallVectorImpl<SDValue> &Pending) {
1107   SDValue Root = DAG.getRoot();
1108 
1109   if (Pending.empty())
1110     return Root;
1111 
1112   // Add current root to PendingChains, unless we already indirectly
1113   // depend on it.
1114   if (Root.getOpcode() != ISD::EntryToken) {
1115     unsigned i = 0, e = Pending.size();
1116     for (; i != e; ++i) {
1117       assert(Pending[i].getNode()->getNumOperands() > 1);
1118       if (Pending[i].getNode()->getOperand(0) == Root)
1119         break;  // Don't add the root if we already indirectly depend on it.
1120     }
1121 
1122     if (i == e)
1123       Pending.push_back(Root);
1124   }
1125 
1126   if (Pending.size() == 1)
1127     Root = Pending[0];
1128   else
1129     Root = DAG.getTokenFactor(getCurSDLoc(), Pending);
1130 
1131   DAG.setRoot(Root);
1132   Pending.clear();
1133   return Root;
1134 }
1135 
1136 SDValue SelectionDAGBuilder::getMemoryRoot() {
1137   return updateRoot(PendingLoads);
1138 }
1139 
1140 SDValue SelectionDAGBuilder::getRoot() {
1141   // Chain up all pending constrained intrinsics together with all
1142   // pending loads, by simply appending them to PendingLoads and
1143   // then calling getMemoryRoot().
1144   PendingLoads.reserve(PendingLoads.size() +
1145                        PendingConstrainedFP.size() +
1146                        PendingConstrainedFPStrict.size());
1147   PendingLoads.append(PendingConstrainedFP.begin(),
1148                       PendingConstrainedFP.end());
1149   PendingLoads.append(PendingConstrainedFPStrict.begin(),
1150                       PendingConstrainedFPStrict.end());
1151   PendingConstrainedFP.clear();
1152   PendingConstrainedFPStrict.clear();
1153   return getMemoryRoot();
1154 }
1155 
1156 SDValue SelectionDAGBuilder::getControlRoot() {
1157   // We need to emit pending fpexcept.strict constrained intrinsics,
1158   // so append them to the PendingExports list.
1159   PendingExports.append(PendingConstrainedFPStrict.begin(),
1160                         PendingConstrainedFPStrict.end());
1161   PendingConstrainedFPStrict.clear();
1162   return updateRoot(PendingExports);
1163 }
1164 
1165 void SelectionDAGBuilder::handleDebugDeclare(Value *Address,
1166                                              DILocalVariable *Variable,
1167                                              DIExpression *Expression,
1168                                              DebugLoc DL) {
1169   assert(Variable && "Missing variable");
1170 
1171   // Check if address has undef value.
1172   if (!Address || isa<UndefValue>(Address) ||
1173       (Address->use_empty() && !isa<Argument>(Address))) {
1174     LLVM_DEBUG(
1175         dbgs()
1176         << "dbg_declare: Dropping debug info (bad/undef/unused-arg address)\n");
1177     return;
1178   }
1179 
1180   bool IsParameter = Variable->isParameter() || isa<Argument>(Address);
1181 
1182   SDValue &N = NodeMap[Address];
1183   if (!N.getNode() && isa<Argument>(Address))
1184     // Check unused arguments map.
1185     N = UnusedArgNodeMap[Address];
1186   SDDbgValue *SDV;
1187   if (N.getNode()) {
1188     if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
1189       Address = BCI->getOperand(0);
1190     // Parameters are handled specially.
1191     auto *FINode = dyn_cast<FrameIndexSDNode>(N.getNode());
1192     if (IsParameter && FINode) {
1193       // Byval parameter. We have a frame index at this point.
1194       SDV = DAG.getFrameIndexDbgValue(Variable, Expression, FINode->getIndex(),
1195                                       /*IsIndirect*/ true, DL, SDNodeOrder);
1196     } else if (isa<Argument>(Address)) {
1197       // Address is an argument, so try to emit its dbg value using
1198       // virtual register info from the FuncInfo.ValueMap.
1199       EmitFuncArgumentDbgValue(Address, Variable, Expression, DL,
1200                                FuncArgumentDbgValueKind::Declare, N);
1201       return;
1202     } else {
1203       SDV = DAG.getDbgValue(Variable, Expression, N.getNode(), N.getResNo(),
1204                             true, DL, SDNodeOrder);
1205     }
1206     DAG.AddDbgValue(SDV, IsParameter);
1207   } else {
1208     // If Address is an argument then try to emit its dbg value using
1209     // virtual register info from the FuncInfo.ValueMap.
1210     if (!EmitFuncArgumentDbgValue(Address, Variable, Expression, DL,
1211                                   FuncArgumentDbgValueKind::Declare, N)) {
1212       LLVM_DEBUG(dbgs() << "dbg_declare: Dropping debug info"
1213                         << " (could not emit func-arg dbg_value)\n");
1214     }
1215   }
1216   return;
1217 }
1218 
1219 void SelectionDAGBuilder::visitDbgInfo(const Instruction &I) {
1220   // Add SDDbgValue nodes for any var locs here. Do so before updating
1221   // SDNodeOrder, as this mapping is {Inst -> Locs BEFORE Inst}.
1222   if (FunctionVarLocs const *FnVarLocs = DAG.getFunctionVarLocs()) {
1223     // Add SDDbgValue nodes for any var locs here. Do so before updating
1224     // SDNodeOrder, as this mapping is {Inst -> Locs BEFORE Inst}.
1225     for (auto It = FnVarLocs->locs_begin(&I), End = FnVarLocs->locs_end(&I);
1226          It != End; ++It) {
1227       auto *Var = FnVarLocs->getDILocalVariable(It->VariableID);
1228       dropDanglingDebugInfo(Var, It->Expr);
1229       if (It->Values.isKillLocation(It->Expr)) {
1230         handleKillDebugValue(Var, It->Expr, It->DL, SDNodeOrder);
1231         continue;
1232       }
1233       SmallVector<Value *> Values(It->Values.location_ops());
1234       if (!handleDebugValue(Values, Var, It->Expr, It->DL, SDNodeOrder,
1235                             It->Values.hasArgList())) {
1236         SmallVector<Value *, 4> Vals;
1237         for (Value *V : It->Values.location_ops())
1238           Vals.push_back(V);
1239         addDanglingDebugInfo(Vals,
1240                              FnVarLocs->getDILocalVariable(It->VariableID),
1241                              It->Expr, Vals.size() > 1, It->DL, SDNodeOrder);
1242       }
1243     }
1244   }
1245 
1246   // We must skip DPValues if they've already been processed above as we
1247   // have just emitted the debug values resulting from assignment tracking
1248   // analysis, making any existing DPValues redundant (and probably less
1249   // correct). We still need to process DPLabels. This does sink DPLabels
1250   // to the bottom of the group of debug records. That sholdn't be important
1251   // as it does so deterministcally and ordering between DPLabels and DPValues
1252   // is immaterial (other than for MIR/IR printing).
1253   bool SkipDPValues = DAG.getFunctionVarLocs();
1254   // Is there is any debug-info attached to this instruction, in the form of
1255   // DbgRecord non-instruction debug-info records.
1256   for (DbgRecord &DR : I.getDbgValueRange()) {
1257     if (DPLabel *DPL = dyn_cast<DPLabel>(&DR)) {
1258       assert(DPL->getLabel() && "Missing label");
1259       SDDbgLabel *SDV =
1260           DAG.getDbgLabel(DPL->getLabel(), DPL->getDebugLoc(), SDNodeOrder);
1261       DAG.AddDbgLabel(SDV);
1262       continue;
1263     }
1264 
1265     if (SkipDPValues)
1266       continue;
1267     DPValue &DPV = cast<DPValue>(DR);
1268     DILocalVariable *Variable = DPV.getVariable();
1269     DIExpression *Expression = DPV.getExpression();
1270     dropDanglingDebugInfo(Variable, Expression);
1271 
1272     if (DPV.getType() == DPValue::LocationType::Declare) {
1273       if (FuncInfo.PreprocessedDPVDeclares.contains(&DPV))
1274         continue;
1275       LLVM_DEBUG(dbgs() << "SelectionDAG visiting dbg_declare: " << DPV
1276                         << "\n");
1277       handleDebugDeclare(DPV.getVariableLocationOp(0), Variable, Expression,
1278                          DPV.getDebugLoc());
1279       continue;
1280     }
1281 
1282     // A DPValue with no locations is a kill location.
1283     SmallVector<Value *, 4> Values(DPV.location_ops());
1284     if (Values.empty()) {
1285       handleKillDebugValue(Variable, Expression, DPV.getDebugLoc(),
1286                            SDNodeOrder);
1287       continue;
1288     }
1289 
1290     // A DPValue with an undef or absent location is also a kill location.
1291     if (llvm::any_of(Values,
1292                      [](Value *V) { return !V || isa<UndefValue>(V); })) {
1293       handleKillDebugValue(Variable, Expression, DPV.getDebugLoc(),
1294                            SDNodeOrder);
1295       continue;
1296     }
1297 
1298     bool IsVariadic = DPV.hasArgList();
1299     if (!handleDebugValue(Values, Variable, Expression, DPV.getDebugLoc(),
1300                           SDNodeOrder, IsVariadic)) {
1301       addDanglingDebugInfo(Values, Variable, Expression, IsVariadic,
1302                            DPV.getDebugLoc(), SDNodeOrder);
1303     }
1304   }
1305 }
1306 
1307 void SelectionDAGBuilder::visit(const Instruction &I) {
1308   visitDbgInfo(I);
1309 
1310   // Set up outgoing PHI node register values before emitting the terminator.
1311   if (I.isTerminator()) {
1312     HandlePHINodesInSuccessorBlocks(I.getParent());
1313   }
1314 
1315   // Increase the SDNodeOrder if dealing with a non-debug instruction.
1316   if (!isa<DbgInfoIntrinsic>(I))
1317     ++SDNodeOrder;
1318 
1319   CurInst = &I;
1320 
1321   // Set inserted listener only if required.
1322   bool NodeInserted = false;
1323   std::unique_ptr<SelectionDAG::DAGNodeInsertedListener> InsertedListener;
1324   MDNode *PCSectionsMD = I.getMetadata(LLVMContext::MD_pcsections);
1325   if (PCSectionsMD) {
1326     InsertedListener = std::make_unique<SelectionDAG::DAGNodeInsertedListener>(
1327         DAG, [&](SDNode *) { NodeInserted = true; });
1328   }
1329 
1330   visit(I.getOpcode(), I);
1331 
1332   if (!I.isTerminator() && !HasTailCall &&
1333       !isa<GCStatepointInst>(I)) // statepoints handle their exports internally
1334     CopyToExportRegsIfNeeded(&I);
1335 
1336   // Handle metadata.
1337   if (PCSectionsMD) {
1338     auto It = NodeMap.find(&I);
1339     if (It != NodeMap.end()) {
1340       DAG.addPCSections(It->second.getNode(), PCSectionsMD);
1341     } else if (NodeInserted) {
1342       // This should not happen; if it does, don't let it go unnoticed so we can
1343       // fix it. Relevant visit*() function is probably missing a setValue().
1344       errs() << "warning: loosing !pcsections metadata ["
1345              << I.getModule()->getName() << "]\n";
1346       LLVM_DEBUG(I.dump());
1347       assert(false);
1348     }
1349   }
1350 
1351   CurInst = nullptr;
1352 }
1353 
1354 void SelectionDAGBuilder::visitPHI(const PHINode &) {
1355   llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!");
1356 }
1357 
1358 void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) {
1359   // Note: this doesn't use InstVisitor, because it has to work with
1360   // ConstantExpr's in addition to instructions.
1361   switch (Opcode) {
1362   default: llvm_unreachable("Unknown instruction type encountered!");
1363     // Build the switch statement using the Instruction.def file.
1364 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1365     case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
1366 #include "llvm/IR/Instruction.def"
1367   }
1368 }
1369 
1370 static bool handleDanglingVariadicDebugInfo(SelectionDAG &DAG,
1371                                             DILocalVariable *Variable,
1372                                             DebugLoc DL, unsigned Order,
1373                                             SmallVectorImpl<Value *> &Values,
1374                                             DIExpression *Expression) {
1375   // For variadic dbg_values we will now insert an undef.
1376   // FIXME: We can potentially recover these!
1377   SmallVector<SDDbgOperand, 2> Locs;
1378   for (const Value *V : Values) {
1379     auto *Undef = UndefValue::get(V->getType());
1380     Locs.push_back(SDDbgOperand::fromConst(Undef));
1381   }
1382   SDDbgValue *SDV = DAG.getDbgValueList(Variable, Expression, Locs, {},
1383                                         /*IsIndirect=*/false, DL, Order,
1384                                         /*IsVariadic=*/true);
1385   DAG.AddDbgValue(SDV, /*isParameter=*/false);
1386   return true;
1387 }
1388 
1389 void SelectionDAGBuilder::addDanglingDebugInfo(SmallVectorImpl<Value *> &Values,
1390                                                DILocalVariable *Var,
1391                                                DIExpression *Expr,
1392                                                bool IsVariadic, DebugLoc DL,
1393                                                unsigned Order) {
1394   if (IsVariadic) {
1395     handleDanglingVariadicDebugInfo(DAG, Var, DL, Order, Values, Expr);
1396     return;
1397   }
1398   // TODO: Dangling debug info will eventually either be resolved or produce
1399   // an Undef DBG_VALUE. However in the resolution case, a gap may appear
1400   // between the original dbg.value location and its resolved DBG_VALUE,
1401   // which we should ideally fill with an extra Undef DBG_VALUE.
1402   assert(Values.size() == 1);
1403   DanglingDebugInfoMap[Values[0]].emplace_back(Var, Expr, DL, Order);
1404 }
1405 
1406 void SelectionDAGBuilder::dropDanglingDebugInfo(const DILocalVariable *Variable,
1407                                                 const DIExpression *Expr) {
1408   auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) {
1409     DIVariable *DanglingVariable = DDI.getVariable();
1410     DIExpression *DanglingExpr = DDI.getExpression();
1411     if (DanglingVariable == Variable && Expr->fragmentsOverlap(DanglingExpr)) {
1412       LLVM_DEBUG(dbgs() << "Dropping dangling debug info for "
1413                         << printDDI(nullptr, DDI) << "\n");
1414       return true;
1415     }
1416     return false;
1417   };
1418 
1419   for (auto &DDIMI : DanglingDebugInfoMap) {
1420     DanglingDebugInfoVector &DDIV = DDIMI.second;
1421 
1422     // If debug info is to be dropped, run it through final checks to see
1423     // whether it can be salvaged.
1424     for (auto &DDI : DDIV)
1425       if (isMatchingDbgValue(DDI))
1426         salvageUnresolvedDbgValue(DDIMI.first, DDI);
1427 
1428     erase_if(DDIV, isMatchingDbgValue);
1429   }
1430 }
1431 
1432 // resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V,
1433 // generate the debug data structures now that we've seen its definition.
1434 void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V,
1435                                                    SDValue Val) {
1436   auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V);
1437   if (DanglingDbgInfoIt == DanglingDebugInfoMap.end())
1438     return;
1439 
1440   DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
1441   for (auto &DDI : DDIV) {
1442     DebugLoc DL = DDI.getDebugLoc();
1443     unsigned ValSDNodeOrder = Val.getNode()->getIROrder();
1444     unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
1445     DILocalVariable *Variable = DDI.getVariable();
1446     DIExpression *Expr = DDI.getExpression();
1447     assert(Variable->isValidLocationForIntrinsic(DL) &&
1448            "Expected inlined-at fields to agree");
1449     SDDbgValue *SDV;
1450     if (Val.getNode()) {
1451       // FIXME: I doubt that it is correct to resolve a dangling DbgValue as a
1452       // FuncArgumentDbgValue (it would be hoisted to the function entry, and if
1453       // we couldn't resolve it directly when examining the DbgValue intrinsic
1454       // in the first place we should not be more successful here). Unless we
1455       // have some test case that prove this to be correct we should avoid
1456       // calling EmitFuncArgumentDbgValue here.
1457       if (!EmitFuncArgumentDbgValue(V, Variable, Expr, DL,
1458                                     FuncArgumentDbgValueKind::Value, Val)) {
1459         LLVM_DEBUG(dbgs() << "Resolve dangling debug info for "
1460                           << printDDI(V, DDI) << "\n");
1461         LLVM_DEBUG(dbgs() << "  By mapping to:\n    "; Val.dump());
1462         // Increase the SDNodeOrder for the DbgValue here to make sure it is
1463         // inserted after the definition of Val when emitting the instructions
1464         // after ISel. An alternative could be to teach
1465         // ScheduleDAGSDNodes::EmitSchedule to delay the insertion properly.
1466         LLVM_DEBUG(if (ValSDNodeOrder > DbgSDNodeOrder) dbgs()
1467                    << "changing SDNodeOrder from " << DbgSDNodeOrder << " to "
1468                    << ValSDNodeOrder << "\n");
1469         SDV = getDbgValue(Val, Variable, Expr, DL,
1470                           std::max(DbgSDNodeOrder, ValSDNodeOrder));
1471         DAG.AddDbgValue(SDV, false);
1472       } else
1473         LLVM_DEBUG(dbgs() << "Resolved dangling debug info for "
1474                           << printDDI(V, DDI)
1475                           << " in EmitFuncArgumentDbgValue\n");
1476     } else {
1477       LLVM_DEBUG(dbgs() << "Dropping debug info for " << printDDI(V, DDI)
1478                         << "\n");
1479       auto Undef = UndefValue::get(V->getType());
1480       auto SDV =
1481           DAG.getConstantDbgValue(Variable, Expr, Undef, DL, DbgSDNodeOrder);
1482       DAG.AddDbgValue(SDV, false);
1483     }
1484   }
1485   DDIV.clear();
1486 }
1487 
1488 void SelectionDAGBuilder::salvageUnresolvedDbgValue(const Value *V,
1489                                                     DanglingDebugInfo &DDI) {
1490   // TODO: For the variadic implementation, instead of only checking the fail
1491   // state of `handleDebugValue`, we need know specifically which values were
1492   // invalid, so that we attempt to salvage only those values when processing
1493   // a DIArgList.
1494   const Value *OrigV = V;
1495   DILocalVariable *Var = DDI.getVariable();
1496   DIExpression *Expr = DDI.getExpression();
1497   DebugLoc DL = DDI.getDebugLoc();
1498   unsigned SDOrder = DDI.getSDNodeOrder();
1499 
1500   // Currently we consider only dbg.value intrinsics -- we tell the salvager
1501   // that DW_OP_stack_value is desired.
1502   bool StackValue = true;
1503 
1504   // Can this Value can be encoded without any further work?
1505   if (handleDebugValue(V, Var, Expr, DL, SDOrder, /*IsVariadic=*/false))
1506     return;
1507 
1508   // Attempt to salvage back through as many instructions as possible. Bail if
1509   // a non-instruction is seen, such as a constant expression or global
1510   // variable. FIXME: Further work could recover those too.
1511   while (isa<Instruction>(V)) {
1512     const Instruction &VAsInst = *cast<const Instruction>(V);
1513     // Temporary "0", awaiting real implementation.
1514     SmallVector<uint64_t, 16> Ops;
1515     SmallVector<Value *, 4> AdditionalValues;
1516     V = salvageDebugInfoImpl(const_cast<Instruction &>(VAsInst),
1517                              Expr->getNumLocationOperands(), Ops,
1518                              AdditionalValues);
1519     // If we cannot salvage any further, and haven't yet found a suitable debug
1520     // expression, bail out.
1521     if (!V)
1522       break;
1523 
1524     // TODO: If AdditionalValues isn't empty, then the salvage can only be
1525     // represented with a DBG_VALUE_LIST, so we give up. When we have support
1526     // here for variadic dbg_values, remove that condition.
1527     if (!AdditionalValues.empty())
1528       break;
1529 
1530     // New value and expr now represent this debuginfo.
1531     Expr = DIExpression::appendOpsToArg(Expr, Ops, 0, StackValue);
1532 
1533     // Some kind of simplification occurred: check whether the operand of the
1534     // salvaged debug expression can be encoded in this DAG.
1535     if (handleDebugValue(V, Var, Expr, DL, SDOrder, /*IsVariadic=*/false)) {
1536       LLVM_DEBUG(
1537           dbgs() << "Salvaged debug location info for:\n  " << *Var << "\n"
1538                  << *OrigV << "\nBy stripping back to:\n  " << *V << "\n");
1539       return;
1540     }
1541   }
1542 
1543   // This was the final opportunity to salvage this debug information, and it
1544   // couldn't be done. Place an undef DBG_VALUE at this location to terminate
1545   // any earlier variable location.
1546   assert(OrigV && "V shouldn't be null");
1547   auto *Undef = UndefValue::get(OrigV->getType());
1548   auto *SDV = DAG.getConstantDbgValue(Var, Expr, Undef, DL, SDNodeOrder);
1549   DAG.AddDbgValue(SDV, false);
1550   LLVM_DEBUG(dbgs() << "Dropping debug value info for:\n  "
1551                     << printDDI(OrigV, DDI) << "\n");
1552 }
1553 
1554 void SelectionDAGBuilder::handleKillDebugValue(DILocalVariable *Var,
1555                                                DIExpression *Expr,
1556                                                DebugLoc DbgLoc,
1557                                                unsigned Order) {
1558   Value *Poison = PoisonValue::get(Type::getInt1Ty(*Context));
1559   DIExpression *NewExpr =
1560       const_cast<DIExpression *>(DIExpression::convertToUndefExpression(Expr));
1561   handleDebugValue(Poison, Var, NewExpr, DbgLoc, Order,
1562                    /*IsVariadic*/ false);
1563 }
1564 
1565 bool SelectionDAGBuilder::handleDebugValue(ArrayRef<const Value *> Values,
1566                                            DILocalVariable *Var,
1567                                            DIExpression *Expr, DebugLoc DbgLoc,
1568                                            unsigned Order, bool IsVariadic) {
1569   if (Values.empty())
1570     return true;
1571 
1572   // Filter EntryValue locations out early.
1573   if (visitEntryValueDbgValue(Values, Var, Expr, DbgLoc))
1574     return true;
1575 
1576   SmallVector<SDDbgOperand> LocationOps;
1577   SmallVector<SDNode *> Dependencies;
1578   for (const Value *V : Values) {
1579     // Constant value.
1580     if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V) ||
1581         isa<ConstantPointerNull>(V)) {
1582       LocationOps.emplace_back(SDDbgOperand::fromConst(V));
1583       continue;
1584     }
1585 
1586     // Look through IntToPtr constants.
1587     if (auto *CE = dyn_cast<ConstantExpr>(V))
1588       if (CE->getOpcode() == Instruction::IntToPtr) {
1589         LocationOps.emplace_back(SDDbgOperand::fromConst(CE->getOperand(0)));
1590         continue;
1591       }
1592 
1593     // If the Value is a frame index, we can create a FrameIndex debug value
1594     // without relying on the DAG at all.
1595     if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1596       auto SI = FuncInfo.StaticAllocaMap.find(AI);
1597       if (SI != FuncInfo.StaticAllocaMap.end()) {
1598         LocationOps.emplace_back(SDDbgOperand::fromFrameIdx(SI->second));
1599         continue;
1600       }
1601     }
1602 
1603     // Do not use getValue() in here; we don't want to generate code at
1604     // this point if it hasn't been done yet.
1605     SDValue N = NodeMap[V];
1606     if (!N.getNode() && isa<Argument>(V)) // Check unused arguments map.
1607       N = UnusedArgNodeMap[V];
1608     if (N.getNode()) {
1609       // Only emit func arg dbg value for non-variadic dbg.values for now.
1610       if (!IsVariadic &&
1611           EmitFuncArgumentDbgValue(V, Var, Expr, DbgLoc,
1612                                    FuncArgumentDbgValueKind::Value, N))
1613         return true;
1614       if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) {
1615         // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can
1616         // describe stack slot locations.
1617         //
1618         // Consider "int x = 0; int *px = &x;". There are two kinds of
1619         // interesting debug values here after optimization:
1620         //
1621         //   dbg.value(i32* %px, !"int *px", !DIExpression()), and
1622         //   dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref))
1623         //
1624         // Both describe the direct values of their associated variables.
1625         Dependencies.push_back(N.getNode());
1626         LocationOps.emplace_back(SDDbgOperand::fromFrameIdx(FISDN->getIndex()));
1627         continue;
1628       }
1629       LocationOps.emplace_back(
1630           SDDbgOperand::fromNode(N.getNode(), N.getResNo()));
1631       continue;
1632     }
1633 
1634     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1635     // Special rules apply for the first dbg.values of parameter variables in a
1636     // function. Identify them by the fact they reference Argument Values, that
1637     // they're parameters, and they are parameters of the current function. We
1638     // need to let them dangle until they get an SDNode.
1639     bool IsParamOfFunc =
1640         isa<Argument>(V) && Var->isParameter() && !DbgLoc.getInlinedAt();
1641     if (IsParamOfFunc)
1642       return false;
1643 
1644     // The value is not used in this block yet (or it would have an SDNode).
1645     // We still want the value to appear for the user if possible -- if it has
1646     // an associated VReg, we can refer to that instead.
1647     auto VMI = FuncInfo.ValueMap.find(V);
1648     if (VMI != FuncInfo.ValueMap.end()) {
1649       unsigned Reg = VMI->second;
1650       // If this is a PHI node, it may be split up into several MI PHI nodes
1651       // (in FunctionLoweringInfo::set).
1652       RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg,
1653                        V->getType(), std::nullopt);
1654       if (RFV.occupiesMultipleRegs()) {
1655         // FIXME: We could potentially support variadic dbg_values here.
1656         if (IsVariadic)
1657           return false;
1658         unsigned Offset = 0;
1659         unsigned BitsToDescribe = 0;
1660         if (auto VarSize = Var->getSizeInBits())
1661           BitsToDescribe = *VarSize;
1662         if (auto Fragment = Expr->getFragmentInfo())
1663           BitsToDescribe = Fragment->SizeInBits;
1664         for (const auto &RegAndSize : RFV.getRegsAndSizes()) {
1665           // Bail out if all bits are described already.
1666           if (Offset >= BitsToDescribe)
1667             break;
1668           // TODO: handle scalable vectors.
1669           unsigned RegisterSize = RegAndSize.second;
1670           unsigned FragmentSize = (Offset + RegisterSize > BitsToDescribe)
1671                                       ? BitsToDescribe - Offset
1672                                       : RegisterSize;
1673           auto FragmentExpr = DIExpression::createFragmentExpression(
1674               Expr, Offset, FragmentSize);
1675           if (!FragmentExpr)
1676             continue;
1677           SDDbgValue *SDV = DAG.getVRegDbgValue(
1678               Var, *FragmentExpr, RegAndSize.first, false, DbgLoc, SDNodeOrder);
1679           DAG.AddDbgValue(SDV, false);
1680           Offset += RegisterSize;
1681         }
1682         return true;
1683       }
1684       // We can use simple vreg locations for variadic dbg_values as well.
1685       LocationOps.emplace_back(SDDbgOperand::fromVReg(Reg));
1686       continue;
1687     }
1688     // We failed to create a SDDbgOperand for V.
1689     return false;
1690   }
1691 
1692   // We have created a SDDbgOperand for each Value in Values.
1693   // Should use Order instead of SDNodeOrder?
1694   assert(!LocationOps.empty());
1695   SDDbgValue *SDV = DAG.getDbgValueList(Var, Expr, LocationOps, Dependencies,
1696                                         /*IsIndirect=*/false, DbgLoc,
1697                                         SDNodeOrder, IsVariadic);
1698   DAG.AddDbgValue(SDV, /*isParameter=*/false);
1699   return true;
1700 }
1701 
1702 void SelectionDAGBuilder::resolveOrClearDbgInfo() {
1703   // Try to fixup any remaining dangling debug info -- and drop it if we can't.
1704   for (auto &Pair : DanglingDebugInfoMap)
1705     for (auto &DDI : Pair.second)
1706       salvageUnresolvedDbgValue(const_cast<Value *>(Pair.first), DDI);
1707   clearDanglingDebugInfo();
1708 }
1709 
1710 /// getCopyFromRegs - If there was virtual register allocated for the value V
1711 /// emit CopyFromReg of the specified type Ty. Return empty SDValue() otherwise.
1712 SDValue SelectionDAGBuilder::getCopyFromRegs(const Value *V, Type *Ty) {
1713   DenseMap<const Value *, Register>::iterator It = FuncInfo.ValueMap.find(V);
1714   SDValue Result;
1715 
1716   if (It != FuncInfo.ValueMap.end()) {
1717     Register InReg = It->second;
1718 
1719     RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
1720                      DAG.getDataLayout(), InReg, Ty,
1721                      std::nullopt); // This is not an ABI copy.
1722     SDValue Chain = DAG.getEntryNode();
1723     Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr,
1724                                  V);
1725     resolveDanglingDebugInfo(V, Result);
1726   }
1727 
1728   return Result;
1729 }
1730 
1731 /// getValue - Return an SDValue for the given Value.
1732 SDValue SelectionDAGBuilder::getValue(const Value *V) {
1733   // If we already have an SDValue for this value, use it. It's important
1734   // to do this first, so that we don't create a CopyFromReg if we already
1735   // have a regular SDValue.
1736   SDValue &N = NodeMap[V];
1737   if (N.getNode()) return N;
1738 
1739   // If there's a virtual register allocated and initialized for this
1740   // value, use it.
1741   if (SDValue copyFromReg = getCopyFromRegs(V, V->getType()))
1742     return copyFromReg;
1743 
1744   // Otherwise create a new SDValue and remember it.
1745   SDValue Val = getValueImpl(V);
1746   NodeMap[V] = Val;
1747   resolveDanglingDebugInfo(V, Val);
1748   return Val;
1749 }
1750 
1751 /// getNonRegisterValue - Return an SDValue for the given Value, but
1752 /// don't look in FuncInfo.ValueMap for a virtual register.
1753 SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) {
1754   // If we already have an SDValue for this value, use it.
1755   SDValue &N = NodeMap[V];
1756   if (N.getNode()) {
1757     if (isIntOrFPConstant(N)) {
1758       // Remove the debug location from the node as the node is about to be used
1759       // in a location which may differ from the original debug location.  This
1760       // is relevant to Constant and ConstantFP nodes because they can appear
1761       // as constant expressions inside PHI nodes.
1762       N->setDebugLoc(DebugLoc());
1763     }
1764     return N;
1765   }
1766 
1767   // Otherwise create a new SDValue and remember it.
1768   SDValue Val = getValueImpl(V);
1769   NodeMap[V] = Val;
1770   resolveDanglingDebugInfo(V, Val);
1771   return Val;
1772 }
1773 
1774 /// getValueImpl - Helper function for getValue and getNonRegisterValue.
1775 /// Create an SDValue for the given value.
1776 SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
1777   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1778 
1779   if (const Constant *C = dyn_cast<Constant>(V)) {
1780     EVT VT = TLI.getValueType(DAG.getDataLayout(), V->getType(), true);
1781 
1782     if (const ConstantInt *CI = dyn_cast<ConstantInt>(C))
1783       return DAG.getConstant(*CI, getCurSDLoc(), VT);
1784 
1785     if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
1786       return DAG.getGlobalAddress(GV, getCurSDLoc(), VT);
1787 
1788     if (isa<ConstantPointerNull>(C)) {
1789       unsigned AS = V->getType()->getPointerAddressSpace();
1790       return DAG.getConstant(0, getCurSDLoc(),
1791                              TLI.getPointerTy(DAG.getDataLayout(), AS));
1792     }
1793 
1794     if (match(C, m_VScale()))
1795       return DAG.getVScale(getCurSDLoc(), VT, APInt(VT.getSizeInBits(), 1));
1796 
1797     if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
1798       return DAG.getConstantFP(*CFP, getCurSDLoc(), VT);
1799 
1800     if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
1801       return DAG.getUNDEF(VT);
1802 
1803     if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
1804       visit(CE->getOpcode(), *CE);
1805       SDValue N1 = NodeMap[V];
1806       assert(N1.getNode() && "visit didn't populate the NodeMap!");
1807       return N1;
1808     }
1809 
1810     if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
1811       SmallVector<SDValue, 4> Constants;
1812       for (const Use &U : C->operands()) {
1813         SDNode *Val = getValue(U).getNode();
1814         // If the operand is an empty aggregate, there are no values.
1815         if (!Val) continue;
1816         // Add each leaf value from the operand to the Constants list
1817         // to form a flattened list of all the values.
1818         for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1819           Constants.push_back(SDValue(Val, i));
1820       }
1821 
1822       return DAG.getMergeValues(Constants, getCurSDLoc());
1823     }
1824 
1825     if (const ConstantDataSequential *CDS =
1826           dyn_cast<ConstantDataSequential>(C)) {
1827       SmallVector<SDValue, 4> Ops;
1828       for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1829         SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode();
1830         // Add each leaf value from the operand to the Constants list
1831         // to form a flattened list of all the values.
1832         for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1833           Ops.push_back(SDValue(Val, i));
1834       }
1835 
1836       if (isa<ArrayType>(CDS->getType()))
1837         return DAG.getMergeValues(Ops, getCurSDLoc());
1838       return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1839     }
1840 
1841     if (C->getType()->isStructTy() || C->getType()->isArrayTy()) {
1842       assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
1843              "Unknown struct or array constant!");
1844 
1845       SmallVector<EVT, 4> ValueVTs;
1846       ComputeValueVTs(TLI, DAG.getDataLayout(), C->getType(), ValueVTs);
1847       unsigned NumElts = ValueVTs.size();
1848       if (NumElts == 0)
1849         return SDValue(); // empty struct
1850       SmallVector<SDValue, 4> Constants(NumElts);
1851       for (unsigned i = 0; i != NumElts; ++i) {
1852         EVT EltVT = ValueVTs[i];
1853         if (isa<UndefValue>(C))
1854           Constants[i] = DAG.getUNDEF(EltVT);
1855         else if (EltVT.isFloatingPoint())
1856           Constants[i] = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1857         else
1858           Constants[i] = DAG.getConstant(0, getCurSDLoc(), EltVT);
1859       }
1860 
1861       return DAG.getMergeValues(Constants, getCurSDLoc());
1862     }
1863 
1864     if (const BlockAddress *BA = dyn_cast<BlockAddress>(C))
1865       return DAG.getBlockAddress(BA, VT);
1866 
1867     if (const auto *Equiv = dyn_cast<DSOLocalEquivalent>(C))
1868       return getValue(Equiv->getGlobalValue());
1869 
1870     if (const auto *NC = dyn_cast<NoCFIValue>(C))
1871       return getValue(NC->getGlobalValue());
1872 
1873     if (VT == MVT::aarch64svcount) {
1874       assert(C->isNullValue() && "Can only zero this target type!");
1875       return DAG.getNode(ISD::BITCAST, getCurSDLoc(), VT,
1876                          DAG.getConstant(0, getCurSDLoc(), MVT::nxv16i1));
1877     }
1878 
1879     VectorType *VecTy = cast<VectorType>(V->getType());
1880 
1881     // Now that we know the number and type of the elements, get that number of
1882     // elements into the Ops array based on what kind of constant it is.
1883     if (const ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
1884       SmallVector<SDValue, 16> Ops;
1885       unsigned NumElements = cast<FixedVectorType>(VecTy)->getNumElements();
1886       for (unsigned i = 0; i != NumElements; ++i)
1887         Ops.push_back(getValue(CV->getOperand(i)));
1888 
1889       return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1890     }
1891 
1892     if (isa<ConstantAggregateZero>(C)) {
1893       EVT EltVT =
1894           TLI.getValueType(DAG.getDataLayout(), VecTy->getElementType());
1895 
1896       SDValue Op;
1897       if (EltVT.isFloatingPoint())
1898         Op = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1899       else
1900         Op = DAG.getConstant(0, getCurSDLoc(), EltVT);
1901 
1902       return NodeMap[V] = DAG.getSplat(VT, getCurSDLoc(), Op);
1903     }
1904 
1905     llvm_unreachable("Unknown vector constant");
1906   }
1907 
1908   // If this is a static alloca, generate it as the frameindex instead of
1909   // computation.
1910   if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1911     DenseMap<const AllocaInst*, int>::iterator SI =
1912       FuncInfo.StaticAllocaMap.find(AI);
1913     if (SI != FuncInfo.StaticAllocaMap.end())
1914       return DAG.getFrameIndex(
1915           SI->second, TLI.getValueType(DAG.getDataLayout(), AI->getType()));
1916   }
1917 
1918   // If this is an instruction which fast-isel has deferred, select it now.
1919   if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
1920     Register InReg = FuncInfo.InitializeRegForValue(Inst);
1921 
1922     RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg,
1923                      Inst->getType(), std::nullopt);
1924     SDValue Chain = DAG.getEntryNode();
1925     return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
1926   }
1927 
1928   if (const MetadataAsValue *MD = dyn_cast<MetadataAsValue>(V))
1929     return DAG.getMDNode(cast<MDNode>(MD->getMetadata()));
1930 
1931   if (const auto *BB = dyn_cast<BasicBlock>(V))
1932     return DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
1933 
1934   llvm_unreachable("Can't get register for value!");
1935 }
1936 
1937 void SelectionDAGBuilder::visitCatchPad(const CatchPadInst &I) {
1938   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1939   bool IsMSVCCXX = Pers == EHPersonality::MSVC_CXX;
1940   bool IsCoreCLR = Pers == EHPersonality::CoreCLR;
1941   bool IsSEH = isAsynchronousEHPersonality(Pers);
1942   MachineBasicBlock *CatchPadMBB = FuncInfo.MBB;
1943   if (!IsSEH)
1944     CatchPadMBB->setIsEHScopeEntry();
1945   // In MSVC C++ and CoreCLR, catchblocks are funclets and need prologues.
1946   if (IsMSVCCXX || IsCoreCLR)
1947     CatchPadMBB->setIsEHFuncletEntry();
1948 }
1949 
1950 void SelectionDAGBuilder::visitCatchRet(const CatchReturnInst &I) {
1951   // Update machine-CFG edge.
1952   MachineBasicBlock *TargetMBB = FuncInfo.MBBMap[I.getSuccessor()];
1953   FuncInfo.MBB->addSuccessor(TargetMBB);
1954   TargetMBB->setIsEHCatchretTarget(true);
1955   DAG.getMachineFunction().setHasEHCatchret(true);
1956 
1957   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1958   bool IsSEH = isAsynchronousEHPersonality(Pers);
1959   if (IsSEH) {
1960     // If this is not a fall-through branch or optimizations are switched off,
1961     // emit the branch.
1962     if (TargetMBB != NextBlock(FuncInfo.MBB) ||
1963         TM.getOptLevel() == CodeGenOptLevel::None)
1964       DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
1965                               getControlRoot(), DAG.getBasicBlock(TargetMBB)));
1966     return;
1967   }
1968 
1969   // Figure out the funclet membership for the catchret's successor.
1970   // This will be used by the FuncletLayout pass to determine how to order the
1971   // BB's.
1972   // A 'catchret' returns to the outer scope's color.
1973   Value *ParentPad = I.getCatchSwitchParentPad();
1974   const BasicBlock *SuccessorColor;
1975   if (isa<ConstantTokenNone>(ParentPad))
1976     SuccessorColor = &FuncInfo.Fn->getEntryBlock();
1977   else
1978     SuccessorColor = cast<Instruction>(ParentPad)->getParent();
1979   assert(SuccessorColor && "No parent funclet for catchret!");
1980   MachineBasicBlock *SuccessorColorMBB = FuncInfo.MBBMap[SuccessorColor];
1981   assert(SuccessorColorMBB && "No MBB for SuccessorColor!");
1982 
1983   // Create the terminator node.
1984   SDValue Ret = DAG.getNode(ISD::CATCHRET, getCurSDLoc(), MVT::Other,
1985                             getControlRoot(), DAG.getBasicBlock(TargetMBB),
1986                             DAG.getBasicBlock(SuccessorColorMBB));
1987   DAG.setRoot(Ret);
1988 }
1989 
1990 void SelectionDAGBuilder::visitCleanupPad(const CleanupPadInst &CPI) {
1991   // Don't emit any special code for the cleanuppad instruction. It just marks
1992   // the start of an EH scope/funclet.
1993   FuncInfo.MBB->setIsEHScopeEntry();
1994   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1995   if (Pers != EHPersonality::Wasm_CXX) {
1996     FuncInfo.MBB->setIsEHFuncletEntry();
1997     FuncInfo.MBB->setIsCleanupFuncletEntry();
1998   }
1999 }
2000 
2001 // In wasm EH, even though a catchpad may not catch an exception if a tag does
2002 // not match, it is OK to add only the first unwind destination catchpad to the
2003 // successors, because there will be at least one invoke instruction within the
2004 // catch scope that points to the next unwind destination, if one exists, so
2005 // CFGSort cannot mess up with BB sorting order.
2006 // (All catchpads with 'catch (type)' clauses have a 'llvm.rethrow' intrinsic
2007 // call within them, and catchpads only consisting of 'catch (...)' have a
2008 // '__cxa_end_catch' call within them, both of which generate invokes in case
2009 // the next unwind destination exists, i.e., the next unwind destination is not
2010 // the caller.)
2011 //
2012 // Having at most one EH pad successor is also simpler and helps later
2013 // transformations.
2014 //
2015 // For example,
2016 // current:
2017 //   invoke void @foo to ... unwind label %catch.dispatch
2018 // catch.dispatch:
2019 //   %0 = catchswitch within ... [label %catch.start] unwind label %next
2020 // catch.start:
2021 //   ...
2022 //   ... in this BB or some other child BB dominated by this BB there will be an
2023 //   invoke that points to 'next' BB as an unwind destination
2024 //
2025 // next: ; We don't need to add this to 'current' BB's successor
2026 //   ...
2027 static void findWasmUnwindDestinations(
2028     FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
2029     BranchProbability Prob,
2030     SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
2031         &UnwindDests) {
2032   while (EHPadBB) {
2033     const Instruction *Pad = EHPadBB->getFirstNonPHI();
2034     if (isa<CleanupPadInst>(Pad)) {
2035       // Stop on cleanup pads.
2036       UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
2037       UnwindDests.back().first->setIsEHScopeEntry();
2038       break;
2039     } else if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2040       // Add the catchpad handlers to the possible destinations. We don't
2041       // continue to the unwind destination of the catchswitch for wasm.
2042       for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2043         UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob);
2044         UnwindDests.back().first->setIsEHScopeEntry();
2045       }
2046       break;
2047     } else {
2048       continue;
2049     }
2050   }
2051 }
2052 
2053 /// When an invoke or a cleanupret unwinds to the next EH pad, there are
2054 /// many places it could ultimately go. In the IR, we have a single unwind
2055 /// destination, but in the machine CFG, we enumerate all the possible blocks.
2056 /// This function skips over imaginary basic blocks that hold catchswitch
2057 /// instructions, and finds all the "real" machine
2058 /// basic block destinations. As those destinations may not be successors of
2059 /// EHPadBB, here we also calculate the edge probability to those destinations.
2060 /// The passed-in Prob is the edge probability to EHPadBB.
2061 static void findUnwindDestinations(
2062     FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
2063     BranchProbability Prob,
2064     SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
2065         &UnwindDests) {
2066   EHPersonality Personality =
2067     classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
2068   bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX;
2069   bool IsCoreCLR = Personality == EHPersonality::CoreCLR;
2070   bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX;
2071   bool IsSEH = isAsynchronousEHPersonality(Personality);
2072 
2073   if (IsWasmCXX) {
2074     findWasmUnwindDestinations(FuncInfo, EHPadBB, Prob, UnwindDests);
2075     assert(UnwindDests.size() <= 1 &&
2076            "There should be at most one unwind destination for wasm");
2077     return;
2078   }
2079 
2080   while (EHPadBB) {
2081     const Instruction *Pad = EHPadBB->getFirstNonPHI();
2082     BasicBlock *NewEHPadBB = nullptr;
2083     if (isa<LandingPadInst>(Pad)) {
2084       // Stop on landingpads. They are not funclets.
2085       UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
2086       break;
2087     } else if (isa<CleanupPadInst>(Pad)) {
2088       // Stop on cleanup pads. Cleanups are always funclet entries for all known
2089       // personalities.
2090       UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
2091       UnwindDests.back().first->setIsEHScopeEntry();
2092       UnwindDests.back().first->setIsEHFuncletEntry();
2093       break;
2094     } else if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2095       // Add the catchpad handlers to the possible destinations.
2096       for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2097         UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob);
2098         // For MSVC++ and the CLR, catchblocks are funclets and need prologues.
2099         if (IsMSVCCXX || IsCoreCLR)
2100           UnwindDests.back().first->setIsEHFuncletEntry();
2101         if (!IsSEH)
2102           UnwindDests.back().first->setIsEHScopeEntry();
2103       }
2104       NewEHPadBB = CatchSwitch->getUnwindDest();
2105     } else {
2106       continue;
2107     }
2108 
2109     BranchProbabilityInfo *BPI = FuncInfo.BPI;
2110     if (BPI && NewEHPadBB)
2111       Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB);
2112     EHPadBB = NewEHPadBB;
2113   }
2114 }
2115 
2116 void SelectionDAGBuilder::visitCleanupRet(const CleanupReturnInst &I) {
2117   // Update successor info.
2118   SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
2119   auto UnwindDest = I.getUnwindDest();
2120   BranchProbabilityInfo *BPI = FuncInfo.BPI;
2121   BranchProbability UnwindDestProb =
2122       (BPI && UnwindDest)
2123           ? BPI->getEdgeProbability(FuncInfo.MBB->getBasicBlock(), UnwindDest)
2124           : BranchProbability::getZero();
2125   findUnwindDestinations(FuncInfo, UnwindDest, UnwindDestProb, UnwindDests);
2126   for (auto &UnwindDest : UnwindDests) {
2127     UnwindDest.first->setIsEHPad();
2128     addSuccessorWithProb(FuncInfo.MBB, UnwindDest.first, UnwindDest.second);
2129   }
2130   FuncInfo.MBB->normalizeSuccProbs();
2131 
2132   // Create the terminator node.
2133   SDValue Ret =
2134       DAG.getNode(ISD::CLEANUPRET, getCurSDLoc(), MVT::Other, getControlRoot());
2135   DAG.setRoot(Ret);
2136 }
2137 
2138 void SelectionDAGBuilder::visitCatchSwitch(const CatchSwitchInst &CSI) {
2139   report_fatal_error("visitCatchSwitch not yet implemented!");
2140 }
2141 
2142 void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
2143   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2144   auto &DL = DAG.getDataLayout();
2145   SDValue Chain = getControlRoot();
2146   SmallVector<ISD::OutputArg, 8> Outs;
2147   SmallVector<SDValue, 8> OutVals;
2148 
2149   // Calls to @llvm.experimental.deoptimize don't generate a return value, so
2150   // lower
2151   //
2152   //   %val = call <ty> @llvm.experimental.deoptimize()
2153   //   ret <ty> %val
2154   //
2155   // differently.
2156   if (I.getParent()->getTerminatingDeoptimizeCall()) {
2157     LowerDeoptimizingReturn();
2158     return;
2159   }
2160 
2161   if (!FuncInfo.CanLowerReturn) {
2162     unsigned DemoteReg = FuncInfo.DemoteRegister;
2163     const Function *F = I.getParent()->getParent();
2164 
2165     // Emit a store of the return value through the virtual register.
2166     // Leave Outs empty so that LowerReturn won't try to load return
2167     // registers the usual way.
2168     SmallVector<EVT, 1> PtrValueVTs;
2169     ComputeValueVTs(TLI, DL,
2170                     PointerType::get(F->getContext(),
2171                                      DAG.getDataLayout().getAllocaAddrSpace()),
2172                     PtrValueVTs);
2173 
2174     SDValue RetPtr =
2175         DAG.getCopyFromReg(Chain, getCurSDLoc(), DemoteReg, PtrValueVTs[0]);
2176     SDValue RetOp = getValue(I.getOperand(0));
2177 
2178     SmallVector<EVT, 4> ValueVTs, MemVTs;
2179     SmallVector<uint64_t, 4> Offsets;
2180     ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs, &MemVTs,
2181                     &Offsets, 0);
2182     unsigned NumValues = ValueVTs.size();
2183 
2184     SmallVector<SDValue, 4> Chains(NumValues);
2185     Align BaseAlign = DL.getPrefTypeAlign(I.getOperand(0)->getType());
2186     for (unsigned i = 0; i != NumValues; ++i) {
2187       // An aggregate return value cannot wrap around the address space, so
2188       // offsets to its parts don't wrap either.
2189       SDValue Ptr = DAG.getObjectPtrOffset(getCurSDLoc(), RetPtr,
2190                                            TypeSize::getFixed(Offsets[i]));
2191 
2192       SDValue Val = RetOp.getValue(RetOp.getResNo() + i);
2193       if (MemVTs[i] != ValueVTs[i])
2194         Val = DAG.getPtrExtOrTrunc(Val, getCurSDLoc(), MemVTs[i]);
2195       Chains[i] = DAG.getStore(
2196           Chain, getCurSDLoc(), Val,
2197           // FIXME: better loc info would be nice.
2198           Ptr, MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()),
2199           commonAlignment(BaseAlign, Offsets[i]));
2200     }
2201 
2202     Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
2203                         MVT::Other, Chains);
2204   } else if (I.getNumOperands() != 0) {
2205     SmallVector<EVT, 4> ValueVTs;
2206     ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs);
2207     unsigned NumValues = ValueVTs.size();
2208     if (NumValues) {
2209       SDValue RetOp = getValue(I.getOperand(0));
2210 
2211       const Function *F = I.getParent()->getParent();
2212 
2213       bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
2214           I.getOperand(0)->getType(), F->getCallingConv(),
2215           /*IsVarArg*/ false, DL);
2216 
2217       ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
2218       if (F->getAttributes().hasRetAttr(Attribute::SExt))
2219         ExtendKind = ISD::SIGN_EXTEND;
2220       else if (F->getAttributes().hasRetAttr(Attribute::ZExt))
2221         ExtendKind = ISD::ZERO_EXTEND;
2222 
2223       LLVMContext &Context = F->getContext();
2224       bool RetInReg = F->getAttributes().hasRetAttr(Attribute::InReg);
2225 
2226       for (unsigned j = 0; j != NumValues; ++j) {
2227         EVT VT = ValueVTs[j];
2228 
2229         if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
2230           VT = TLI.getTypeForExtReturn(Context, VT, ExtendKind);
2231 
2232         CallingConv::ID CC = F->getCallingConv();
2233 
2234         unsigned NumParts = TLI.getNumRegistersForCallingConv(Context, CC, VT);
2235         MVT PartVT = TLI.getRegisterTypeForCallingConv(Context, CC, VT);
2236         SmallVector<SDValue, 4> Parts(NumParts);
2237         getCopyToParts(DAG, getCurSDLoc(),
2238                        SDValue(RetOp.getNode(), RetOp.getResNo() + j),
2239                        &Parts[0], NumParts, PartVT, &I, CC, ExtendKind);
2240 
2241         // 'inreg' on function refers to return value
2242         ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
2243         if (RetInReg)
2244           Flags.setInReg();
2245 
2246         if (I.getOperand(0)->getType()->isPointerTy()) {
2247           Flags.setPointer();
2248           Flags.setPointerAddrSpace(
2249               cast<PointerType>(I.getOperand(0)->getType())->getAddressSpace());
2250         }
2251 
2252         if (NeedsRegBlock) {
2253           Flags.setInConsecutiveRegs();
2254           if (j == NumValues - 1)
2255             Flags.setInConsecutiveRegsLast();
2256         }
2257 
2258         // Propagate extension type if any
2259         if (ExtendKind == ISD::SIGN_EXTEND)
2260           Flags.setSExt();
2261         else if (ExtendKind == ISD::ZERO_EXTEND)
2262           Flags.setZExt();
2263 
2264         for (unsigned i = 0; i < NumParts; ++i) {
2265           Outs.push_back(ISD::OutputArg(Flags,
2266                                         Parts[i].getValueType().getSimpleVT(),
2267                                         VT, /*isfixed=*/true, 0, 0));
2268           OutVals.push_back(Parts[i]);
2269         }
2270       }
2271     }
2272   }
2273 
2274   // Push in swifterror virtual register as the last element of Outs. This makes
2275   // sure swifterror virtual register will be returned in the swifterror
2276   // physical register.
2277   const Function *F = I.getParent()->getParent();
2278   if (TLI.supportSwiftError() &&
2279       F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
2280     assert(SwiftError.getFunctionArg() && "Need a swift error argument");
2281     ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
2282     Flags.setSwiftError();
2283     Outs.push_back(ISD::OutputArg(
2284         Flags, /*vt=*/TLI.getPointerTy(DL), /*argvt=*/EVT(TLI.getPointerTy(DL)),
2285         /*isfixed=*/true, /*origidx=*/1, /*partOffs=*/0));
2286     // Create SDNode for the swifterror virtual register.
2287     OutVals.push_back(
2288         DAG.getRegister(SwiftError.getOrCreateVRegUseAt(
2289                             &I, FuncInfo.MBB, SwiftError.getFunctionArg()),
2290                         EVT(TLI.getPointerTy(DL))));
2291   }
2292 
2293   bool isVarArg = DAG.getMachineFunction().getFunction().isVarArg();
2294   CallingConv::ID CallConv =
2295     DAG.getMachineFunction().getFunction().getCallingConv();
2296   Chain = DAG.getTargetLoweringInfo().LowerReturn(
2297       Chain, CallConv, isVarArg, Outs, OutVals, getCurSDLoc(), DAG);
2298 
2299   // Verify that the target's LowerReturn behaved as expected.
2300   assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
2301          "LowerReturn didn't return a valid chain!");
2302 
2303   // Update the DAG with the new chain value resulting from return lowering.
2304   DAG.setRoot(Chain);
2305 }
2306 
2307 /// CopyToExportRegsIfNeeded - If the given value has virtual registers
2308 /// created for it, emit nodes to copy the value into the virtual
2309 /// registers.
2310 void SelectionDAGBuilder::CopyToExportRegsIfNeeded(const Value *V) {
2311   // Skip empty types
2312   if (V->getType()->isEmptyTy())
2313     return;
2314 
2315   DenseMap<const Value *, Register>::iterator VMI = FuncInfo.ValueMap.find(V);
2316   if (VMI != FuncInfo.ValueMap.end()) {
2317     assert((!V->use_empty() || isa<CallBrInst>(V)) &&
2318            "Unused value assigned virtual registers!");
2319     CopyValueToVirtualRegister(V, VMI->second);
2320   }
2321 }
2322 
2323 /// ExportFromCurrentBlock - If this condition isn't known to be exported from
2324 /// the current basic block, add it to ValueMap now so that we'll get a
2325 /// CopyTo/FromReg.
2326 void SelectionDAGBuilder::ExportFromCurrentBlock(const Value *V) {
2327   // No need to export constants.
2328   if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
2329 
2330   // Already exported?
2331   if (FuncInfo.isExportedInst(V)) return;
2332 
2333   Register Reg = FuncInfo.InitializeRegForValue(V);
2334   CopyValueToVirtualRegister(V, Reg);
2335 }
2336 
2337 bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V,
2338                                                      const BasicBlock *FromBB) {
2339   // The operands of the setcc have to be in this block.  We don't know
2340   // how to export them from some other block.
2341   if (const Instruction *VI = dyn_cast<Instruction>(V)) {
2342     // Can export from current BB.
2343     if (VI->getParent() == FromBB)
2344       return true;
2345 
2346     // Is already exported, noop.
2347     return FuncInfo.isExportedInst(V);
2348   }
2349 
2350   // If this is an argument, we can export it if the BB is the entry block or
2351   // if it is already exported.
2352   if (isa<Argument>(V)) {
2353     if (FromBB->isEntryBlock())
2354       return true;
2355 
2356     // Otherwise, can only export this if it is already exported.
2357     return FuncInfo.isExportedInst(V);
2358   }
2359 
2360   // Otherwise, constants can always be exported.
2361   return true;
2362 }
2363 
2364 /// Return branch probability calculated by BranchProbabilityInfo for IR blocks.
2365 BranchProbability
2366 SelectionDAGBuilder::getEdgeProbability(const MachineBasicBlock *Src,
2367                                         const MachineBasicBlock *Dst) const {
2368   BranchProbabilityInfo *BPI = FuncInfo.BPI;
2369   const BasicBlock *SrcBB = Src->getBasicBlock();
2370   const BasicBlock *DstBB = Dst->getBasicBlock();
2371   if (!BPI) {
2372     // If BPI is not available, set the default probability as 1 / N, where N is
2373     // the number of successors.
2374     auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
2375     return BranchProbability(1, SuccSize);
2376   }
2377   return BPI->getEdgeProbability(SrcBB, DstBB);
2378 }
2379 
2380 void SelectionDAGBuilder::addSuccessorWithProb(MachineBasicBlock *Src,
2381                                                MachineBasicBlock *Dst,
2382                                                BranchProbability Prob) {
2383   if (!FuncInfo.BPI)
2384     Src->addSuccessorWithoutProb(Dst);
2385   else {
2386     if (Prob.isUnknown())
2387       Prob = getEdgeProbability(Src, Dst);
2388     Src->addSuccessor(Dst, Prob);
2389   }
2390 }
2391 
2392 static bool InBlock(const Value *V, const BasicBlock *BB) {
2393   if (const Instruction *I = dyn_cast<Instruction>(V))
2394     return I->getParent() == BB;
2395   return true;
2396 }
2397 
2398 /// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
2399 /// This function emits a branch and is used at the leaves of an OR or an
2400 /// AND operator tree.
2401 void
2402 SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
2403                                                   MachineBasicBlock *TBB,
2404                                                   MachineBasicBlock *FBB,
2405                                                   MachineBasicBlock *CurBB,
2406                                                   MachineBasicBlock *SwitchBB,
2407                                                   BranchProbability TProb,
2408                                                   BranchProbability FProb,
2409                                                   bool InvertCond) {
2410   const BasicBlock *BB = CurBB->getBasicBlock();
2411 
2412   // If the leaf of the tree is a comparison, merge the condition into
2413   // the caseblock.
2414   if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
2415     // The operands of the cmp have to be in this block.  We don't know
2416     // how to export them from some other block.  If this is the first block
2417     // of the sequence, no exporting is needed.
2418     if (CurBB == SwitchBB ||
2419         (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
2420          isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
2421       ISD::CondCode Condition;
2422       if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
2423         ICmpInst::Predicate Pred =
2424             InvertCond ? IC->getInversePredicate() : IC->getPredicate();
2425         Condition = getICmpCondCode(Pred);
2426       } else {
2427         const FCmpInst *FC = cast<FCmpInst>(Cond);
2428         FCmpInst::Predicate Pred =
2429             InvertCond ? FC->getInversePredicate() : FC->getPredicate();
2430         Condition = getFCmpCondCode(Pred);
2431         if (TM.Options.NoNaNsFPMath)
2432           Condition = getFCmpCodeWithoutNaN(Condition);
2433       }
2434 
2435       CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1), nullptr,
2436                    TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
2437       SL->SwitchCases.push_back(CB);
2438       return;
2439     }
2440   }
2441 
2442   // Create a CaseBlock record representing this branch.
2443   ISD::CondCode Opc = InvertCond ? ISD::SETNE : ISD::SETEQ;
2444   CaseBlock CB(Opc, Cond, ConstantInt::getTrue(*DAG.getContext()),
2445                nullptr, TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
2446   SL->SwitchCases.push_back(CB);
2447 }
2448 
2449 void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
2450                                                MachineBasicBlock *TBB,
2451                                                MachineBasicBlock *FBB,
2452                                                MachineBasicBlock *CurBB,
2453                                                MachineBasicBlock *SwitchBB,
2454                                                Instruction::BinaryOps Opc,
2455                                                BranchProbability TProb,
2456                                                BranchProbability FProb,
2457                                                bool InvertCond) {
2458   // Skip over not part of the tree and remember to invert op and operands at
2459   // next level.
2460   Value *NotCond;
2461   if (match(Cond, m_OneUse(m_Not(m_Value(NotCond)))) &&
2462       InBlock(NotCond, CurBB->getBasicBlock())) {
2463     FindMergedConditions(NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
2464                          !InvertCond);
2465     return;
2466   }
2467 
2468   const Instruction *BOp = dyn_cast<Instruction>(Cond);
2469   const Value *BOpOp0, *BOpOp1;
2470   // Compute the effective opcode for Cond, taking into account whether it needs
2471   // to be inverted, e.g.
2472   //   and (not (or A, B)), C
2473   // gets lowered as
2474   //   and (and (not A, not B), C)
2475   Instruction::BinaryOps BOpc = (Instruction::BinaryOps)0;
2476   if (BOp) {
2477     BOpc = match(BOp, m_LogicalAnd(m_Value(BOpOp0), m_Value(BOpOp1)))
2478                ? Instruction::And
2479                : (match(BOp, m_LogicalOr(m_Value(BOpOp0), m_Value(BOpOp1)))
2480                       ? Instruction::Or
2481                       : (Instruction::BinaryOps)0);
2482     if (InvertCond) {
2483       if (BOpc == Instruction::And)
2484         BOpc = Instruction::Or;
2485       else if (BOpc == Instruction::Or)
2486         BOpc = Instruction::And;
2487     }
2488   }
2489 
2490   // If this node is not part of the or/and tree, emit it as a branch.
2491   // Note that all nodes in the tree should have same opcode.
2492   bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->hasOneUse();
2493   if (!BOpIsInOrAndTree || BOp->getParent() != CurBB->getBasicBlock() ||
2494       !InBlock(BOpOp0, CurBB->getBasicBlock()) ||
2495       !InBlock(BOpOp1, CurBB->getBasicBlock())) {
2496     EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB,
2497                                  TProb, FProb, InvertCond);
2498     return;
2499   }
2500 
2501   //  Create TmpBB after CurBB.
2502   MachineFunction::iterator BBI(CurBB);
2503   MachineFunction &MF = DAG.getMachineFunction();
2504   MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
2505   CurBB->getParent()->insert(++BBI, TmpBB);
2506 
2507   if (Opc == Instruction::Or) {
2508     // Codegen X | Y as:
2509     // BB1:
2510     //   jmp_if_X TBB
2511     //   jmp TmpBB
2512     // TmpBB:
2513     //   jmp_if_Y TBB
2514     //   jmp FBB
2515     //
2516 
2517     // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
2518     // The requirement is that
2519     //   TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
2520     //     = TrueProb for original BB.
2521     // Assuming the original probabilities are A and B, one choice is to set
2522     // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
2523     // A/(1+B) and 2B/(1+B). This choice assumes that
2524     //   TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
2525     // Another choice is to assume TrueProb for BB1 equals to TrueProb for
2526     // TmpBB, but the math is more complicated.
2527 
2528     auto NewTrueProb = TProb / 2;
2529     auto NewFalseProb = TProb / 2 + FProb;
2530     // Emit the LHS condition.
2531     FindMergedConditions(BOpOp0, TBB, TmpBB, CurBB, SwitchBB, Opc, NewTrueProb,
2532                          NewFalseProb, InvertCond);
2533 
2534     // Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
2535     SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb};
2536     BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
2537     // Emit the RHS condition into TmpBB.
2538     FindMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
2539                          Probs[1], InvertCond);
2540   } else {
2541     assert(Opc == Instruction::And && "Unknown merge op!");
2542     // Codegen X & Y as:
2543     // BB1:
2544     //   jmp_if_X TmpBB
2545     //   jmp FBB
2546     // TmpBB:
2547     //   jmp_if_Y TBB
2548     //   jmp FBB
2549     //
2550     //  This requires creation of TmpBB after CurBB.
2551 
2552     // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
2553     // The requirement is that
2554     //   FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
2555     //     = FalseProb for original BB.
2556     // Assuming the original probabilities are A and B, one choice is to set
2557     // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
2558     // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
2559     // TrueProb for BB1 * FalseProb for TmpBB.
2560 
2561     auto NewTrueProb = TProb + FProb / 2;
2562     auto NewFalseProb = FProb / 2;
2563     // Emit the LHS condition.
2564     FindMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB, Opc, NewTrueProb,
2565                          NewFalseProb, InvertCond);
2566 
2567     // Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
2568     SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2};
2569     BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
2570     // Emit the RHS condition into TmpBB.
2571     FindMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
2572                          Probs[1], InvertCond);
2573   }
2574 }
2575 
2576 /// If the set of cases should be emitted as a series of branches, return true.
2577 /// If we should emit this as a bunch of and/or'd together conditions, return
2578 /// false.
2579 bool
2580 SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases) {
2581   if (Cases.size() != 2) return true;
2582 
2583   // If this is two comparisons of the same values or'd or and'd together, they
2584   // will get folded into a single comparison, so don't emit two blocks.
2585   if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
2586        Cases[0].CmpRHS == Cases[1].CmpRHS) ||
2587       (Cases[0].CmpRHS == Cases[1].CmpLHS &&
2588        Cases[0].CmpLHS == Cases[1].CmpRHS)) {
2589     return false;
2590   }
2591 
2592   // Handle: (X != null) | (Y != null) --> (X|Y) != 0
2593   // Handle: (X == null) & (Y == null) --> (X|Y) == 0
2594   if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
2595       Cases[0].CC == Cases[1].CC &&
2596       isa<Constant>(Cases[0].CmpRHS) &&
2597       cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
2598     if (Cases[0].CC == ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
2599       return false;
2600     if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
2601       return false;
2602   }
2603 
2604   return true;
2605 }
2606 
2607 void SelectionDAGBuilder::visitBr(const BranchInst &I) {
2608   MachineBasicBlock *BrMBB = FuncInfo.MBB;
2609 
2610   // Update machine-CFG edges.
2611   MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
2612 
2613   if (I.isUnconditional()) {
2614     // Update machine-CFG edges.
2615     BrMBB->addSuccessor(Succ0MBB);
2616 
2617     // If this is not a fall-through branch or optimizations are switched off,
2618     // emit the branch.
2619     if (Succ0MBB != NextBlock(BrMBB) ||
2620         TM.getOptLevel() == CodeGenOptLevel::None) {
2621       auto Br = DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
2622                             getControlRoot(), DAG.getBasicBlock(Succ0MBB));
2623       setValue(&I, Br);
2624       DAG.setRoot(Br);
2625     }
2626 
2627     return;
2628   }
2629 
2630   // If this condition is one of the special cases we handle, do special stuff
2631   // now.
2632   const Value *CondVal = I.getCondition();
2633   MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
2634 
2635   // If this is a series of conditions that are or'd or and'd together, emit
2636   // this as a sequence of branches instead of setcc's with and/or operations.
2637   // As long as jumps are not expensive (exceptions for multi-use logic ops,
2638   // unpredictable branches, and vector extracts because those jumps are likely
2639   // expensive for any target), this should improve performance.
2640   // For example, instead of something like:
2641   //     cmp A, B
2642   //     C = seteq
2643   //     cmp D, E
2644   //     F = setle
2645   //     or C, F
2646   //     jnz foo
2647   // Emit:
2648   //     cmp A, B
2649   //     je foo
2650   //     cmp D, E
2651   //     jle foo
2652   const Instruction *BOp = dyn_cast<Instruction>(CondVal);
2653   if (!DAG.getTargetLoweringInfo().isJumpExpensive() && BOp &&
2654       BOp->hasOneUse() && !I.hasMetadata(LLVMContext::MD_unpredictable)) {
2655     Value *Vec;
2656     const Value *BOp0, *BOp1;
2657     Instruction::BinaryOps Opcode = (Instruction::BinaryOps)0;
2658     if (match(BOp, m_LogicalAnd(m_Value(BOp0), m_Value(BOp1))))
2659       Opcode = Instruction::And;
2660     else if (match(BOp, m_LogicalOr(m_Value(BOp0), m_Value(BOp1))))
2661       Opcode = Instruction::Or;
2662 
2663     if (Opcode && !(match(BOp0, m_ExtractElt(m_Value(Vec), m_Value())) &&
2664                     match(BOp1, m_ExtractElt(m_Specific(Vec), m_Value())))) {
2665       FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB, Opcode,
2666                            getEdgeProbability(BrMBB, Succ0MBB),
2667                            getEdgeProbability(BrMBB, Succ1MBB),
2668                            /*InvertCond=*/false);
2669       // If the compares in later blocks need to use values not currently
2670       // exported from this block, export them now.  This block should always
2671       // be the first entry.
2672       assert(SL->SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!");
2673 
2674       // Allow some cases to be rejected.
2675       if (ShouldEmitAsBranches(SL->SwitchCases)) {
2676         for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i) {
2677           ExportFromCurrentBlock(SL->SwitchCases[i].CmpLHS);
2678           ExportFromCurrentBlock(SL->SwitchCases[i].CmpRHS);
2679         }
2680 
2681         // Emit the branch for this block.
2682         visitSwitchCase(SL->SwitchCases[0], BrMBB);
2683         SL->SwitchCases.erase(SL->SwitchCases.begin());
2684         return;
2685       }
2686 
2687       // Okay, we decided not to do this, remove any inserted MBB's and clear
2688       // SwitchCases.
2689       for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i)
2690         FuncInfo.MF->erase(SL->SwitchCases[i].ThisBB);
2691 
2692       SL->SwitchCases.clear();
2693     }
2694   }
2695 
2696   // Create a CaseBlock record representing this branch.
2697   CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
2698                nullptr, Succ0MBB, Succ1MBB, BrMBB, getCurSDLoc());
2699 
2700   // Use visitSwitchCase to actually insert the fast branch sequence for this
2701   // cond branch.
2702   visitSwitchCase(CB, BrMBB);
2703 }
2704 
2705 /// visitSwitchCase - Emits the necessary code to represent a single node in
2706 /// the binary search tree resulting from lowering a switch instruction.
2707 void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB,
2708                                           MachineBasicBlock *SwitchBB) {
2709   SDValue Cond;
2710   SDValue CondLHS = getValue(CB.CmpLHS);
2711   SDLoc dl = CB.DL;
2712 
2713   if (CB.CC == ISD::SETTRUE) {
2714     // Branch or fall through to TrueBB.
2715     addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
2716     SwitchBB->normalizeSuccProbs();
2717     if (CB.TrueBB != NextBlock(SwitchBB)) {
2718       DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, getControlRoot(),
2719                               DAG.getBasicBlock(CB.TrueBB)));
2720     }
2721     return;
2722   }
2723 
2724   auto &TLI = DAG.getTargetLoweringInfo();
2725   EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), CB.CmpLHS->getType());
2726 
2727   // Build the setcc now.
2728   if (!CB.CmpMHS) {
2729     // Fold "(X == true)" to X and "(X == false)" to !X to
2730     // handle common cases produced by branch lowering.
2731     if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
2732         CB.CC == ISD::SETEQ)
2733       Cond = CondLHS;
2734     else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) &&
2735              CB.CC == ISD::SETEQ) {
2736       SDValue True = DAG.getConstant(1, dl, CondLHS.getValueType());
2737       Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
2738     } else {
2739       SDValue CondRHS = getValue(CB.CmpRHS);
2740 
2741       // If a pointer's DAG type is larger than its memory type then the DAG
2742       // values are zero-extended. This breaks signed comparisons so truncate
2743       // back to the underlying type before doing the compare.
2744       if (CondLHS.getValueType() != MemVT) {
2745         CondLHS = DAG.getPtrExtOrTrunc(CondLHS, getCurSDLoc(), MemVT);
2746         CondRHS = DAG.getPtrExtOrTrunc(CondRHS, getCurSDLoc(), MemVT);
2747       }
2748       Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, CondRHS, CB.CC);
2749     }
2750   } else {
2751     assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
2752 
2753     const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
2754     const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
2755 
2756     SDValue CmpOp = getValue(CB.CmpMHS);
2757     EVT VT = CmpOp.getValueType();
2758 
2759     if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
2760       Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, dl, VT),
2761                           ISD::SETLE);
2762     } else {
2763       SDValue SUB = DAG.getNode(ISD::SUB, dl,
2764                                 VT, CmpOp, DAG.getConstant(Low, dl, VT));
2765       Cond = DAG.getSetCC(dl, MVT::i1, SUB,
2766                           DAG.getConstant(High-Low, dl, VT), ISD::SETULE);
2767     }
2768   }
2769 
2770   // Update successor info
2771   addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
2772   // TrueBB and FalseBB are always different unless the incoming IR is
2773   // degenerate. This only happens when running llc on weird IR.
2774   if (CB.TrueBB != CB.FalseBB)
2775     addSuccessorWithProb(SwitchBB, CB.FalseBB, CB.FalseProb);
2776   SwitchBB->normalizeSuccProbs();
2777 
2778   // If the lhs block is the next block, invert the condition so that we can
2779   // fall through to the lhs instead of the rhs block.
2780   if (CB.TrueBB == NextBlock(SwitchBB)) {
2781     std::swap(CB.TrueBB, CB.FalseBB);
2782     SDValue True = DAG.getConstant(1, dl, Cond.getValueType());
2783     Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
2784   }
2785 
2786   SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2787                                MVT::Other, getControlRoot(), Cond,
2788                                DAG.getBasicBlock(CB.TrueBB));
2789 
2790   setValue(CurInst, BrCond);
2791 
2792   // Insert the false branch. Do this even if it's a fall through branch,
2793   // this makes it easier to do DAG optimizations which require inverting
2794   // the branch condition.
2795   BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2796                        DAG.getBasicBlock(CB.FalseBB));
2797 
2798   DAG.setRoot(BrCond);
2799 }
2800 
2801 /// visitJumpTable - Emit JumpTable node in the current MBB
2802 void SelectionDAGBuilder::visitJumpTable(SwitchCG::JumpTable &JT) {
2803   // Emit the code for the jump table
2804   assert(JT.SL && "Should set SDLoc for SelectionDAG!");
2805   assert(JT.Reg != -1U && "Should lower JT Header first!");
2806   EVT PTy = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
2807   SDValue Index = DAG.getCopyFromReg(getControlRoot(), *JT.SL, JT.Reg, PTy);
2808   SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
2809   SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, *JT.SL, MVT::Other,
2810                                     Index.getValue(1), Table, Index);
2811   DAG.setRoot(BrJumpTable);
2812 }
2813 
2814 /// visitJumpTableHeader - This function emits necessary code to produce index
2815 /// in the JumpTable from switch case.
2816 void SelectionDAGBuilder::visitJumpTableHeader(SwitchCG::JumpTable &JT,
2817                                                JumpTableHeader &JTH,
2818                                                MachineBasicBlock *SwitchBB) {
2819   assert(JT.SL && "Should set SDLoc for SelectionDAG!");
2820   const SDLoc &dl = *JT.SL;
2821 
2822   // Subtract the lowest switch case value from the value being switched on.
2823   SDValue SwitchOp = getValue(JTH.SValue);
2824   EVT VT = SwitchOp.getValueType();
2825   SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp,
2826                             DAG.getConstant(JTH.First, dl, VT));
2827 
2828   // The SDNode we just created, which holds the value being switched on minus
2829   // the smallest case value, needs to be copied to a virtual register so it
2830   // can be used as an index into the jump table in a subsequent basic block.
2831   // This value may be smaller or larger than the target's pointer type, and
2832   // therefore require extension or truncating.
2833   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2834   SwitchOp = DAG.getZExtOrTrunc(Sub, dl, TLI.getPointerTy(DAG.getDataLayout()));
2835 
2836   unsigned JumpTableReg =
2837       FuncInfo.CreateReg(TLI.getPointerTy(DAG.getDataLayout()));
2838   SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl,
2839                                     JumpTableReg, SwitchOp);
2840   JT.Reg = JumpTableReg;
2841 
2842   if (!JTH.FallthroughUnreachable) {
2843     // Emit the range check for the jump table, and branch to the default block
2844     // for the switch statement if the value being switched on exceeds the
2845     // largest case in the switch.
2846     SDValue CMP = DAG.getSetCC(
2847         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
2848                                    Sub.getValueType()),
2849         Sub, DAG.getConstant(JTH.Last - JTH.First, dl, VT), ISD::SETUGT);
2850 
2851     SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2852                                  MVT::Other, CopyTo, CMP,
2853                                  DAG.getBasicBlock(JT.Default));
2854 
2855     // Avoid emitting unnecessary branches to the next block.
2856     if (JT.MBB != NextBlock(SwitchBB))
2857       BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2858                            DAG.getBasicBlock(JT.MBB));
2859 
2860     DAG.setRoot(BrCond);
2861   } else {
2862     // Avoid emitting unnecessary branches to the next block.
2863     if (JT.MBB != NextBlock(SwitchBB))
2864       DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, CopyTo,
2865                               DAG.getBasicBlock(JT.MBB)));
2866     else
2867       DAG.setRoot(CopyTo);
2868   }
2869 }
2870 
2871 /// Create a LOAD_STACK_GUARD node, and let it carry the target specific global
2872 /// variable if there exists one.
2873 static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL,
2874                                  SDValue &Chain) {
2875   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2876   EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
2877   EVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout());
2878   MachineFunction &MF = DAG.getMachineFunction();
2879   Value *Global = TLI.getSDagStackGuard(*MF.getFunction().getParent());
2880   MachineSDNode *Node =
2881       DAG.getMachineNode(TargetOpcode::LOAD_STACK_GUARD, DL, PtrTy, Chain);
2882   if (Global) {
2883     MachinePointerInfo MPInfo(Global);
2884     auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
2885                  MachineMemOperand::MODereferenceable;
2886     MachineMemOperand *MemRef = MF.getMachineMemOperand(
2887         MPInfo, Flags, PtrTy.getSizeInBits() / 8, DAG.getEVTAlign(PtrTy));
2888     DAG.setNodeMemRefs(Node, {MemRef});
2889   }
2890   if (PtrTy != PtrMemTy)
2891     return DAG.getPtrExtOrTrunc(SDValue(Node, 0), DL, PtrMemTy);
2892   return SDValue(Node, 0);
2893 }
2894 
2895 /// Codegen a new tail for a stack protector check ParentMBB which has had its
2896 /// tail spliced into a stack protector check success bb.
2897 ///
2898 /// For a high level explanation of how this fits into the stack protector
2899 /// generation see the comment on the declaration of class
2900 /// StackProtectorDescriptor.
2901 void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
2902                                                   MachineBasicBlock *ParentBB) {
2903 
2904   // First create the loads to the guard/stack slot for the comparison.
2905   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2906   EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
2907   EVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout());
2908 
2909   MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
2910   int FI = MFI.getStackProtectorIndex();
2911 
2912   SDValue Guard;
2913   SDLoc dl = getCurSDLoc();
2914   SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy);
2915   const Module &M = *ParentBB->getParent()->getFunction().getParent();
2916   Align Align =
2917       DAG.getDataLayout().getPrefTypeAlign(PointerType::get(M.getContext(), 0));
2918 
2919   // Generate code to load the content of the guard slot.
2920   SDValue GuardVal = DAG.getLoad(
2921       PtrMemTy, dl, DAG.getEntryNode(), StackSlotPtr,
2922       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), Align,
2923       MachineMemOperand::MOVolatile);
2924 
2925   if (TLI.useStackGuardXorFP())
2926     GuardVal = TLI.emitStackGuardXorFP(DAG, GuardVal, dl);
2927 
2928   // Retrieve guard check function, nullptr if instrumentation is inlined.
2929   if (const Function *GuardCheckFn = TLI.getSSPStackGuardCheck(M)) {
2930     // The target provides a guard check function to validate the guard value.
2931     // Generate a call to that function with the content of the guard slot as
2932     // argument.
2933     FunctionType *FnTy = GuardCheckFn->getFunctionType();
2934     assert(FnTy->getNumParams() == 1 && "Invalid function signature");
2935 
2936     TargetLowering::ArgListTy Args;
2937     TargetLowering::ArgListEntry Entry;
2938     Entry.Node = GuardVal;
2939     Entry.Ty = FnTy->getParamType(0);
2940     if (GuardCheckFn->hasParamAttribute(0, Attribute::AttrKind::InReg))
2941       Entry.IsInReg = true;
2942     Args.push_back(Entry);
2943 
2944     TargetLowering::CallLoweringInfo CLI(DAG);
2945     CLI.setDebugLoc(getCurSDLoc())
2946         .setChain(DAG.getEntryNode())
2947         .setCallee(GuardCheckFn->getCallingConv(), FnTy->getReturnType(),
2948                    getValue(GuardCheckFn), std::move(Args));
2949 
2950     std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
2951     DAG.setRoot(Result.second);
2952     return;
2953   }
2954 
2955   // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
2956   // Otherwise, emit a volatile load to retrieve the stack guard value.
2957   SDValue Chain = DAG.getEntryNode();
2958   if (TLI.useLoadStackGuardNode()) {
2959     Guard = getLoadStackGuard(DAG, dl, Chain);
2960   } else {
2961     const Value *IRGuard = TLI.getSDagStackGuard(M);
2962     SDValue GuardPtr = getValue(IRGuard);
2963 
2964     Guard = DAG.getLoad(PtrMemTy, dl, Chain, GuardPtr,
2965                         MachinePointerInfo(IRGuard, 0), Align,
2966                         MachineMemOperand::MOVolatile);
2967   }
2968 
2969   // Perform the comparison via a getsetcc.
2970   SDValue Cmp = DAG.getSetCC(dl, TLI.getSetCCResultType(DAG.getDataLayout(),
2971                                                         *DAG.getContext(),
2972                                                         Guard.getValueType()),
2973                              Guard, GuardVal, ISD::SETNE);
2974 
2975   // If the guard/stackslot do not equal, branch to failure MBB.
2976   SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2977                                MVT::Other, GuardVal.getOperand(0),
2978                                Cmp, DAG.getBasicBlock(SPD.getFailureMBB()));
2979   // Otherwise branch to success MBB.
2980   SDValue Br = DAG.getNode(ISD::BR, dl,
2981                            MVT::Other, BrCond,
2982                            DAG.getBasicBlock(SPD.getSuccessMBB()));
2983 
2984   DAG.setRoot(Br);
2985 }
2986 
2987 /// Codegen the failure basic block for a stack protector check.
2988 ///
2989 /// A failure stack protector machine basic block consists simply of a call to
2990 /// __stack_chk_fail().
2991 ///
2992 /// For a high level explanation of how this fits into the stack protector
2993 /// generation see the comment on the declaration of class
2994 /// StackProtectorDescriptor.
2995 void
2996 SelectionDAGBuilder::visitSPDescriptorFailure(StackProtectorDescriptor &SPD) {
2997   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2998   TargetLowering::MakeLibCallOptions CallOptions;
2999   CallOptions.setDiscardResult(true);
3000   SDValue Chain =
3001       TLI.makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid,
3002                       std::nullopt, CallOptions, getCurSDLoc())
3003           .second;
3004   // On PS4/PS5, the "return address" must still be within the calling
3005   // function, even if it's at the very end, so emit an explicit TRAP here.
3006   // Passing 'true' for doesNotReturn above won't generate the trap for us.
3007   if (TM.getTargetTriple().isPS())
3008     Chain = DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, Chain);
3009   // WebAssembly needs an unreachable instruction after a non-returning call,
3010   // because the function return type can be different from __stack_chk_fail's
3011   // return type (void).
3012   if (TM.getTargetTriple().isWasm())
3013     Chain = DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, Chain);
3014 
3015   DAG.setRoot(Chain);
3016 }
3017 
3018 /// visitBitTestHeader - This function emits necessary code to produce value
3019 /// suitable for "bit tests"
3020 void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
3021                                              MachineBasicBlock *SwitchBB) {
3022   SDLoc dl = getCurSDLoc();
3023 
3024   // Subtract the minimum value.
3025   SDValue SwitchOp = getValue(B.SValue);
3026   EVT VT = SwitchOp.getValueType();
3027   SDValue RangeSub =
3028       DAG.getNode(ISD::SUB, dl, VT, SwitchOp, DAG.getConstant(B.First, dl, VT));
3029 
3030   // Determine the type of the test operands.
3031   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3032   bool UsePtrType = false;
3033   if (!TLI.isTypeLegal(VT)) {
3034     UsePtrType = true;
3035   } else {
3036     for (unsigned i = 0, e = B.Cases.size(); i != e; ++i)
3037       if (!isUIntN(VT.getSizeInBits(), B.Cases[i].Mask)) {
3038         // Switch table case range are encoded into series of masks.
3039         // Just use pointer type, it's guaranteed to fit.
3040         UsePtrType = true;
3041         break;
3042       }
3043   }
3044   SDValue Sub = RangeSub;
3045   if (UsePtrType) {
3046     VT = TLI.getPointerTy(DAG.getDataLayout());
3047     Sub = DAG.getZExtOrTrunc(Sub, dl, VT);
3048   }
3049 
3050   B.RegVT = VT.getSimpleVT();
3051   B.Reg = FuncInfo.CreateReg(B.RegVT);
3052   SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl, B.Reg, Sub);
3053 
3054   MachineBasicBlock* MBB = B.Cases[0].ThisBB;
3055 
3056   if (!B.FallthroughUnreachable)
3057     addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb);
3058   addSuccessorWithProb(SwitchBB, MBB, B.Prob);
3059   SwitchBB->normalizeSuccProbs();
3060 
3061   SDValue Root = CopyTo;
3062   if (!B.FallthroughUnreachable) {
3063     // Conditional branch to the default block.
3064     SDValue RangeCmp = DAG.getSetCC(dl,
3065         TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
3066                                RangeSub.getValueType()),
3067         RangeSub, DAG.getConstant(B.Range, dl, RangeSub.getValueType()),
3068         ISD::SETUGT);
3069 
3070     Root = DAG.getNode(ISD::BRCOND, dl, MVT::Other, Root, RangeCmp,
3071                        DAG.getBasicBlock(B.Default));
3072   }
3073 
3074   // Avoid emitting unnecessary branches to the next block.
3075   if (MBB != NextBlock(SwitchBB))
3076     Root = DAG.getNode(ISD::BR, dl, MVT::Other, Root, DAG.getBasicBlock(MBB));
3077 
3078   DAG.setRoot(Root);
3079 }
3080 
3081 /// visitBitTestCase - this function produces one "bit test"
3082 void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB,
3083                                            MachineBasicBlock* NextMBB,
3084                                            BranchProbability BranchProbToNext,
3085                                            unsigned Reg,
3086                                            BitTestCase &B,
3087                                            MachineBasicBlock *SwitchBB) {
3088   SDLoc dl = getCurSDLoc();
3089   MVT VT = BB.RegVT;
3090   SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), dl, Reg, VT);
3091   SDValue Cmp;
3092   unsigned PopCount = llvm::popcount(B.Mask);
3093   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3094   if (PopCount == 1) {
3095     // Testing for a single bit; just compare the shift count with what it
3096     // would need to be to shift a 1 bit in that position.
3097     Cmp = DAG.getSetCC(
3098         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
3099         ShiftOp, DAG.getConstant(llvm::countr_zero(B.Mask), dl, VT),
3100         ISD::SETEQ);
3101   } else if (PopCount == BB.Range) {
3102     // There is only one zero bit in the range, test for it directly.
3103     Cmp = DAG.getSetCC(
3104         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
3105         ShiftOp, DAG.getConstant(llvm::countr_one(B.Mask), dl, VT), ISD::SETNE);
3106   } else {
3107     // Make desired shift
3108     SDValue SwitchVal = DAG.getNode(ISD::SHL, dl, VT,
3109                                     DAG.getConstant(1, dl, VT), ShiftOp);
3110 
3111     // Emit bit tests and jumps
3112     SDValue AndOp = DAG.getNode(ISD::AND, dl,
3113                                 VT, SwitchVal, DAG.getConstant(B.Mask, dl, VT));
3114     Cmp = DAG.getSetCC(
3115         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
3116         AndOp, DAG.getConstant(0, dl, VT), ISD::SETNE);
3117   }
3118 
3119   // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
3120   addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb);
3121   // The branch probability from SwitchBB to NextMBB is BranchProbToNext.
3122   addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
3123   // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is
3124   // one as they are relative probabilities (and thus work more like weights),
3125   // and hence we need to normalize them to let the sum of them become one.
3126   SwitchBB->normalizeSuccProbs();
3127 
3128   SDValue BrAnd = DAG.getNode(ISD::BRCOND, dl,
3129                               MVT::Other, getControlRoot(),
3130                               Cmp, DAG.getBasicBlock(B.TargetBB));
3131 
3132   // Avoid emitting unnecessary branches to the next block.
3133   if (NextMBB != NextBlock(SwitchBB))
3134     BrAnd = DAG.getNode(ISD::BR, dl, MVT::Other, BrAnd,
3135                         DAG.getBasicBlock(NextMBB));
3136 
3137   DAG.setRoot(BrAnd);
3138 }
3139 
3140 void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
3141   MachineBasicBlock *InvokeMBB = FuncInfo.MBB;
3142 
3143   // Retrieve successors. Look through artificial IR level blocks like
3144   // catchswitch for successors.
3145   MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
3146   const BasicBlock *EHPadBB = I.getSuccessor(1);
3147   MachineBasicBlock *EHPadMBB = FuncInfo.MBBMap[EHPadBB];
3148 
3149   // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
3150   // have to do anything here to lower funclet bundles.
3151   assert(!I.hasOperandBundlesOtherThan(
3152              {LLVMContext::OB_deopt, LLVMContext::OB_gc_transition,
3153               LLVMContext::OB_gc_live, LLVMContext::OB_funclet,
3154               LLVMContext::OB_cfguardtarget,
3155               LLVMContext::OB_clang_arc_attachedcall}) &&
3156          "Cannot lower invokes with arbitrary operand bundles yet!");
3157 
3158   const Value *Callee(I.getCalledOperand());
3159   const Function *Fn = dyn_cast<Function>(Callee);
3160   if (isa<InlineAsm>(Callee))
3161     visitInlineAsm(I, EHPadBB);
3162   else if (Fn && Fn->isIntrinsic()) {
3163     switch (Fn->getIntrinsicID()) {
3164     default:
3165       llvm_unreachable("Cannot invoke this intrinsic");
3166     case Intrinsic::donothing:
3167       // Ignore invokes to @llvm.donothing: jump directly to the next BB.
3168     case Intrinsic::seh_try_begin:
3169     case Intrinsic::seh_scope_begin:
3170     case Intrinsic::seh_try_end:
3171     case Intrinsic::seh_scope_end:
3172       if (EHPadMBB)
3173           // a block referenced by EH table
3174           // so dtor-funclet not removed by opts
3175           EHPadMBB->setMachineBlockAddressTaken();
3176       break;
3177     case Intrinsic::experimental_patchpoint_void:
3178     case Intrinsic::experimental_patchpoint_i64:
3179       visitPatchpoint(I, EHPadBB);
3180       break;
3181     case Intrinsic::experimental_gc_statepoint:
3182       LowerStatepoint(cast<GCStatepointInst>(I), EHPadBB);
3183       break;
3184     case Intrinsic::wasm_rethrow: {
3185       // This is usually done in visitTargetIntrinsic, but this intrinsic is
3186       // special because it can be invoked, so we manually lower it to a DAG
3187       // node here.
3188       SmallVector<SDValue, 8> Ops;
3189       Ops.push_back(getRoot()); // inchain
3190       const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3191       Ops.push_back(
3192           DAG.getTargetConstant(Intrinsic::wasm_rethrow, getCurSDLoc(),
3193                                 TLI.getPointerTy(DAG.getDataLayout())));
3194       SDVTList VTs = DAG.getVTList(ArrayRef<EVT>({MVT::Other})); // outchain
3195       DAG.setRoot(DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops));
3196       break;
3197     }
3198     }
3199   } else if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) {
3200     // Currently we do not lower any intrinsic calls with deopt operand bundles.
3201     // Eventually we will support lowering the @llvm.experimental.deoptimize
3202     // intrinsic, and right now there are no plans to support other intrinsics
3203     // with deopt state.
3204     LowerCallSiteWithDeoptBundle(&I, getValue(Callee), EHPadBB);
3205   } else {
3206     LowerCallTo(I, getValue(Callee), false, false, EHPadBB);
3207   }
3208 
3209   // If the value of the invoke is used outside of its defining block, make it
3210   // available as a virtual register.
3211   // We already took care of the exported value for the statepoint instruction
3212   // during call to the LowerStatepoint.
3213   if (!isa<GCStatepointInst>(I)) {
3214     CopyToExportRegsIfNeeded(&I);
3215   }
3216 
3217   SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
3218   BranchProbabilityInfo *BPI = FuncInfo.BPI;
3219   BranchProbability EHPadBBProb =
3220       BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB)
3221           : BranchProbability::getZero();
3222   findUnwindDestinations(FuncInfo, EHPadBB, EHPadBBProb, UnwindDests);
3223 
3224   // Update successor info.
3225   addSuccessorWithProb(InvokeMBB, Return);
3226   for (auto &UnwindDest : UnwindDests) {
3227     UnwindDest.first->setIsEHPad();
3228     addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
3229   }
3230   InvokeMBB->normalizeSuccProbs();
3231 
3232   // Drop into normal successor.
3233   DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, getControlRoot(),
3234                           DAG.getBasicBlock(Return)));
3235 }
3236 
3237 void SelectionDAGBuilder::visitCallBr(const CallBrInst &I) {
3238   MachineBasicBlock *CallBrMBB = FuncInfo.MBB;
3239 
3240   // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
3241   // have to do anything here to lower funclet bundles.
3242   assert(!I.hasOperandBundlesOtherThan(
3243              {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) &&
3244          "Cannot lower callbrs with arbitrary operand bundles yet!");
3245 
3246   assert(I.isInlineAsm() && "Only know how to handle inlineasm callbr");
3247   visitInlineAsm(I);
3248   CopyToExportRegsIfNeeded(&I);
3249 
3250   // Retrieve successors.
3251   SmallPtrSet<BasicBlock *, 8> Dests;
3252   Dests.insert(I.getDefaultDest());
3253   MachineBasicBlock *Return = FuncInfo.MBBMap[I.getDefaultDest()];
3254 
3255   // Update successor info.
3256   addSuccessorWithProb(CallBrMBB, Return, BranchProbability::getOne());
3257   for (unsigned i = 0, e = I.getNumIndirectDests(); i < e; ++i) {
3258     BasicBlock *Dest = I.getIndirectDest(i);
3259     MachineBasicBlock *Target = FuncInfo.MBBMap[Dest];
3260     Target->setIsInlineAsmBrIndirectTarget();
3261     Target->setMachineBlockAddressTaken();
3262     Target->setLabelMustBeEmitted();
3263     // Don't add duplicate machine successors.
3264     if (Dests.insert(Dest).second)
3265       addSuccessorWithProb(CallBrMBB, Target, BranchProbability::getZero());
3266   }
3267   CallBrMBB->normalizeSuccProbs();
3268 
3269   // Drop into default successor.
3270   DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
3271                           MVT::Other, getControlRoot(),
3272                           DAG.getBasicBlock(Return)));
3273 }
3274 
3275 void SelectionDAGBuilder::visitResume(const ResumeInst &RI) {
3276   llvm_unreachable("SelectionDAGBuilder shouldn't visit resume instructions!");
3277 }
3278 
3279 void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) {
3280   assert(FuncInfo.MBB->isEHPad() &&
3281          "Call to landingpad not in landing pad!");
3282 
3283   // If there aren't registers to copy the values into (e.g., during SjLj
3284   // exceptions), then don't bother to create these DAG nodes.
3285   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3286   const Constant *PersonalityFn = FuncInfo.Fn->getPersonalityFn();
3287   if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
3288       TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
3289     return;
3290 
3291   // If landingpad's return type is token type, we don't create DAG nodes
3292   // for its exception pointer and selector value. The extraction of exception
3293   // pointer or selector value from token type landingpads is not currently
3294   // supported.
3295   if (LP.getType()->isTokenTy())
3296     return;
3297 
3298   SmallVector<EVT, 2> ValueVTs;
3299   SDLoc dl = getCurSDLoc();
3300   ComputeValueVTs(TLI, DAG.getDataLayout(), LP.getType(), ValueVTs);
3301   assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported");
3302 
3303   // Get the two live-in registers as SDValues. The physregs have already been
3304   // copied into virtual registers.
3305   SDValue Ops[2];
3306   if (FuncInfo.ExceptionPointerVirtReg) {
3307     Ops[0] = DAG.getZExtOrTrunc(
3308         DAG.getCopyFromReg(DAG.getEntryNode(), dl,
3309                            FuncInfo.ExceptionPointerVirtReg,
3310                            TLI.getPointerTy(DAG.getDataLayout())),
3311         dl, ValueVTs[0]);
3312   } else {
3313     Ops[0] = DAG.getConstant(0, dl, TLI.getPointerTy(DAG.getDataLayout()));
3314   }
3315   Ops[1] = DAG.getZExtOrTrunc(
3316       DAG.getCopyFromReg(DAG.getEntryNode(), dl,
3317                          FuncInfo.ExceptionSelectorVirtReg,
3318                          TLI.getPointerTy(DAG.getDataLayout())),
3319       dl, ValueVTs[1]);
3320 
3321   // Merge into one.
3322   SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl,
3323                             DAG.getVTList(ValueVTs), Ops);
3324   setValue(&LP, Res);
3325 }
3326 
3327 void SelectionDAGBuilder::UpdateSplitBlock(MachineBasicBlock *First,
3328                                            MachineBasicBlock *Last) {
3329   // Update JTCases.
3330   for (JumpTableBlock &JTB : SL->JTCases)
3331     if (JTB.first.HeaderBB == First)
3332       JTB.first.HeaderBB = Last;
3333 
3334   // Update BitTestCases.
3335   for (BitTestBlock &BTB : SL->BitTestCases)
3336     if (BTB.Parent == First)
3337       BTB.Parent = Last;
3338 }
3339 
3340 void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
3341   MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB;
3342 
3343   // Update machine-CFG edges with unique successors.
3344   SmallSet<BasicBlock*, 32> Done;
3345   for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) {
3346     BasicBlock *BB = I.getSuccessor(i);
3347     bool Inserted = Done.insert(BB).second;
3348     if (!Inserted)
3349         continue;
3350 
3351     MachineBasicBlock *Succ = FuncInfo.MBBMap[BB];
3352     addSuccessorWithProb(IndirectBrMBB, Succ);
3353   }
3354   IndirectBrMBB->normalizeSuccProbs();
3355 
3356   DAG.setRoot(DAG.getNode(ISD::BRIND, getCurSDLoc(),
3357                           MVT::Other, getControlRoot(),
3358                           getValue(I.getAddress())));
3359 }
3360 
3361 void SelectionDAGBuilder::visitUnreachable(const UnreachableInst &I) {
3362   if (!DAG.getTarget().Options.TrapUnreachable)
3363     return;
3364 
3365   // We may be able to ignore unreachable behind a noreturn call.
3366   if (DAG.getTarget().Options.NoTrapAfterNoreturn) {
3367     if (const CallInst *Call = dyn_cast_or_null<CallInst>(I.getPrevNode())) {
3368       if (Call->doesNotReturn())
3369         return;
3370     }
3371   }
3372 
3373   DAG.setRoot(DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, DAG.getRoot()));
3374 }
3375 
3376 void SelectionDAGBuilder::visitUnary(const User &I, unsigned Opcode) {
3377   SDNodeFlags Flags;
3378   if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
3379     Flags.copyFMF(*FPOp);
3380 
3381   SDValue Op = getValue(I.getOperand(0));
3382   SDValue UnNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op.getValueType(),
3383                                     Op, Flags);
3384   setValue(&I, UnNodeValue);
3385 }
3386 
3387 void SelectionDAGBuilder::visitBinary(const User &I, unsigned Opcode) {
3388   SDNodeFlags Flags;
3389   if (auto *OFBinOp = dyn_cast<OverflowingBinaryOperator>(&I)) {
3390     Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap());
3391     Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap());
3392   }
3393   if (auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I))
3394     Flags.setExact(ExactOp->isExact());
3395   if (auto *DisjointOp = dyn_cast<PossiblyDisjointInst>(&I))
3396     Flags.setDisjoint(DisjointOp->isDisjoint());
3397   if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
3398     Flags.copyFMF(*FPOp);
3399 
3400   SDValue Op1 = getValue(I.getOperand(0));
3401   SDValue Op2 = getValue(I.getOperand(1));
3402   SDValue BinNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(),
3403                                      Op1, Op2, Flags);
3404   setValue(&I, BinNodeValue);
3405 }
3406 
3407 void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
3408   SDValue Op1 = getValue(I.getOperand(0));
3409   SDValue Op2 = getValue(I.getOperand(1));
3410 
3411   EVT ShiftTy = DAG.getTargetLoweringInfo().getShiftAmountTy(
3412       Op1.getValueType(), DAG.getDataLayout());
3413 
3414   // Coerce the shift amount to the right type if we can. This exposes the
3415   // truncate or zext to optimization early.
3416   if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {
3417     assert(ShiftTy.getSizeInBits() >= Log2_32_Ceil(Op1.getValueSizeInBits()) &&
3418            "Unexpected shift type");
3419     Op2 = DAG.getZExtOrTrunc(Op2, getCurSDLoc(), ShiftTy);
3420   }
3421 
3422   bool nuw = false;
3423   bool nsw = false;
3424   bool exact = false;
3425 
3426   if (Opcode == ISD::SRL || Opcode == ISD::SRA || Opcode == ISD::SHL) {
3427 
3428     if (const OverflowingBinaryOperator *OFBinOp =
3429             dyn_cast<const OverflowingBinaryOperator>(&I)) {
3430       nuw = OFBinOp->hasNoUnsignedWrap();
3431       nsw = OFBinOp->hasNoSignedWrap();
3432     }
3433     if (const PossiblyExactOperator *ExactOp =
3434             dyn_cast<const PossiblyExactOperator>(&I))
3435       exact = ExactOp->isExact();
3436   }
3437   SDNodeFlags Flags;
3438   Flags.setExact(exact);
3439   Flags.setNoSignedWrap(nsw);
3440   Flags.setNoUnsignedWrap(nuw);
3441   SDValue Res = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(), Op1, Op2,
3442                             Flags);
3443   setValue(&I, Res);
3444 }
3445 
3446 void SelectionDAGBuilder::visitSDiv(const User &I) {
3447   SDValue Op1 = getValue(I.getOperand(0));
3448   SDValue Op2 = getValue(I.getOperand(1));
3449 
3450   SDNodeFlags Flags;
3451   Flags.setExact(isa<PossiblyExactOperator>(&I) &&
3452                  cast<PossiblyExactOperator>(&I)->isExact());
3453   setValue(&I, DAG.getNode(ISD::SDIV, getCurSDLoc(), Op1.getValueType(), Op1,
3454                            Op2, Flags));
3455 }
3456 
3457 void SelectionDAGBuilder::visitICmp(const User &I) {
3458   ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
3459   if (const ICmpInst *IC = dyn_cast<ICmpInst>(&I))
3460     predicate = IC->getPredicate();
3461   else if (const ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
3462     predicate = ICmpInst::Predicate(IC->getPredicate());
3463   SDValue Op1 = getValue(I.getOperand(0));
3464   SDValue Op2 = getValue(I.getOperand(1));
3465   ISD::CondCode Opcode = getICmpCondCode(predicate);
3466 
3467   auto &TLI = DAG.getTargetLoweringInfo();
3468   EVT MemVT =
3469       TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
3470 
3471   // If a pointer's DAG type is larger than its memory type then the DAG values
3472   // are zero-extended. This breaks signed comparisons so truncate back to the
3473   // underlying type before doing the compare.
3474   if (Op1.getValueType() != MemVT) {
3475     Op1 = DAG.getPtrExtOrTrunc(Op1, getCurSDLoc(), MemVT);
3476     Op2 = DAG.getPtrExtOrTrunc(Op2, getCurSDLoc(), MemVT);
3477   }
3478 
3479   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3480                                                         I.getType());
3481   setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Opcode));
3482 }
3483 
3484 void SelectionDAGBuilder::visitFCmp(const User &I) {
3485   FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
3486   if (const FCmpInst *FC = dyn_cast<FCmpInst>(&I))
3487     predicate = FC->getPredicate();
3488   else if (const ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
3489     predicate = FCmpInst::Predicate(FC->getPredicate());
3490   SDValue Op1 = getValue(I.getOperand(0));
3491   SDValue Op2 = getValue(I.getOperand(1));
3492 
3493   ISD::CondCode Condition = getFCmpCondCode(predicate);
3494   auto *FPMO = cast<FPMathOperator>(&I);
3495   if (FPMO->hasNoNaNs() || TM.Options.NoNaNsFPMath)
3496     Condition = getFCmpCodeWithoutNaN(Condition);
3497 
3498   SDNodeFlags Flags;
3499   Flags.copyFMF(*FPMO);
3500   SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
3501 
3502   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3503                                                         I.getType());
3504   setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition));
3505 }
3506 
3507 // Check if the condition of the select has one use or two users that are both
3508 // selects with the same condition.
3509 static bool hasOnlySelectUsers(const Value *Cond) {
3510   return llvm::all_of(Cond->users(), [](const Value *V) {
3511     return isa<SelectInst>(V);
3512   });
3513 }
3514 
3515 void SelectionDAGBuilder::visitSelect(const User &I) {
3516   SmallVector<EVT, 4> ValueVTs;
3517   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
3518                   ValueVTs);
3519   unsigned NumValues = ValueVTs.size();
3520   if (NumValues == 0) return;
3521 
3522   SmallVector<SDValue, 4> Values(NumValues);
3523   SDValue Cond     = getValue(I.getOperand(0));
3524   SDValue LHSVal   = getValue(I.getOperand(1));
3525   SDValue RHSVal   = getValue(I.getOperand(2));
3526   SmallVector<SDValue, 1> BaseOps(1, Cond);
3527   ISD::NodeType OpCode =
3528       Cond.getValueType().isVector() ? ISD::VSELECT : ISD::SELECT;
3529 
3530   bool IsUnaryAbs = false;
3531   bool Negate = false;
3532 
3533   SDNodeFlags Flags;
3534   if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
3535     Flags.copyFMF(*FPOp);
3536 
3537   Flags.setUnpredictable(
3538       cast<SelectInst>(I).getMetadata(LLVMContext::MD_unpredictable));
3539 
3540   // Min/max matching is only viable if all output VTs are the same.
3541   if (all_equal(ValueVTs)) {
3542     EVT VT = ValueVTs[0];
3543     LLVMContext &Ctx = *DAG.getContext();
3544     auto &TLI = DAG.getTargetLoweringInfo();
3545 
3546     // We care about the legality of the operation after it has been type
3547     // legalized.
3548     while (TLI.getTypeAction(Ctx, VT) != TargetLoweringBase::TypeLegal)
3549       VT = TLI.getTypeToTransformTo(Ctx, VT);
3550 
3551     // If the vselect is legal, assume we want to leave this as a vector setcc +
3552     // vselect. Otherwise, if this is going to be scalarized, we want to see if
3553     // min/max is legal on the scalar type.
3554     bool UseScalarMinMax = VT.isVector() &&
3555       !TLI.isOperationLegalOrCustom(ISD::VSELECT, VT);
3556 
3557     // ValueTracking's select pattern matching does not account for -0.0,
3558     // so we can't lower to FMINIMUM/FMAXIMUM because those nodes specify that
3559     // -0.0 is less than +0.0.
3560     Value *LHS, *RHS;
3561     auto SPR = matchSelectPattern(const_cast<User*>(&I), LHS, RHS);
3562     ISD::NodeType Opc = ISD::DELETED_NODE;
3563     switch (SPR.Flavor) {
3564     case SPF_UMAX:    Opc = ISD::UMAX; break;
3565     case SPF_UMIN:    Opc = ISD::UMIN; break;
3566     case SPF_SMAX:    Opc = ISD::SMAX; break;
3567     case SPF_SMIN:    Opc = ISD::SMIN; break;
3568     case SPF_FMINNUM:
3569       switch (SPR.NaNBehavior) {
3570       case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
3571       case SPNB_RETURNS_NAN: break;
3572       case SPNB_RETURNS_OTHER: Opc = ISD::FMINNUM; break;
3573       case SPNB_RETURNS_ANY:
3574         if (TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT) ||
3575             (UseScalarMinMax &&
3576              TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT.getScalarType())))
3577           Opc = ISD::FMINNUM;
3578         break;
3579       }
3580       break;
3581     case SPF_FMAXNUM:
3582       switch (SPR.NaNBehavior) {
3583       case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
3584       case SPNB_RETURNS_NAN: break;
3585       case SPNB_RETURNS_OTHER: Opc = ISD::FMAXNUM; break;
3586       case SPNB_RETURNS_ANY:
3587         if (TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT) ||
3588             (UseScalarMinMax &&
3589              TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT.getScalarType())))
3590           Opc = ISD::FMAXNUM;
3591         break;
3592       }
3593       break;
3594     case SPF_NABS:
3595       Negate = true;
3596       [[fallthrough]];
3597     case SPF_ABS:
3598       IsUnaryAbs = true;
3599       Opc = ISD::ABS;
3600       break;
3601     default: break;
3602     }
3603 
3604     if (!IsUnaryAbs && Opc != ISD::DELETED_NODE &&
3605         (TLI.isOperationLegalOrCustomOrPromote(Opc, VT) ||
3606          (UseScalarMinMax &&
3607           TLI.isOperationLegalOrCustom(Opc, VT.getScalarType()))) &&
3608         // If the underlying comparison instruction is used by any other
3609         // instruction, the consumed instructions won't be destroyed, so it is
3610         // not profitable to convert to a min/max.
3611         hasOnlySelectUsers(cast<SelectInst>(I).getCondition())) {
3612       OpCode = Opc;
3613       LHSVal = getValue(LHS);
3614       RHSVal = getValue(RHS);
3615       BaseOps.clear();
3616     }
3617 
3618     if (IsUnaryAbs) {
3619       OpCode = Opc;
3620       LHSVal = getValue(LHS);
3621       BaseOps.clear();
3622     }
3623   }
3624 
3625   if (IsUnaryAbs) {
3626     for (unsigned i = 0; i != NumValues; ++i) {
3627       SDLoc dl = getCurSDLoc();
3628       EVT VT = LHSVal.getNode()->getValueType(LHSVal.getResNo() + i);
3629       Values[i] =
3630           DAG.getNode(OpCode, dl, VT, LHSVal.getValue(LHSVal.getResNo() + i));
3631       if (Negate)
3632         Values[i] = DAG.getNegative(Values[i], dl, VT);
3633     }
3634   } else {
3635     for (unsigned i = 0; i != NumValues; ++i) {
3636       SmallVector<SDValue, 3> Ops(BaseOps.begin(), BaseOps.end());
3637       Ops.push_back(SDValue(LHSVal.getNode(), LHSVal.getResNo() + i));
3638       Ops.push_back(SDValue(RHSVal.getNode(), RHSVal.getResNo() + i));
3639       Values[i] = DAG.getNode(
3640           OpCode, getCurSDLoc(),
3641           LHSVal.getNode()->getValueType(LHSVal.getResNo() + i), Ops, Flags);
3642     }
3643   }
3644 
3645   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3646                            DAG.getVTList(ValueVTs), Values));
3647 }
3648 
3649 void SelectionDAGBuilder::visitTrunc(const User &I) {
3650   // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
3651   SDValue N = getValue(I.getOperand(0));
3652   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3653                                                         I.getType());
3654   setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), DestVT, N));
3655 }
3656 
3657 void SelectionDAGBuilder::visitZExt(const User &I) {
3658   // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3659   // ZExt also can't be a cast to bool for same reason. So, nothing much to do
3660   SDValue N = getValue(I.getOperand(0));
3661   auto &TLI = DAG.getTargetLoweringInfo();
3662   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3663 
3664   SDNodeFlags Flags;
3665   if (auto *PNI = dyn_cast<PossiblyNonNegInst>(&I))
3666     Flags.setNonNeg(PNI->hasNonNeg());
3667 
3668   // Eagerly use nonneg information to canonicalize towards sign_extend if
3669   // that is the target's preference.
3670   // TODO: Let the target do this later.
3671   if (Flags.hasNonNeg() &&
3672       TLI.isSExtCheaperThanZExt(N.getValueType(), DestVT)) {
3673     setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N));
3674     return;
3675   }
3676 
3677   setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurSDLoc(), DestVT, N, Flags));
3678 }
3679 
3680 void SelectionDAGBuilder::visitSExt(const User &I) {
3681   // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3682   // SExt also can't be a cast to bool for same reason. So, nothing much to do
3683   SDValue N = getValue(I.getOperand(0));
3684   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3685                                                         I.getType());
3686   setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N));
3687 }
3688 
3689 void SelectionDAGBuilder::visitFPTrunc(const User &I) {
3690   // FPTrunc is never a no-op cast, no need to check
3691   SDValue N = getValue(I.getOperand(0));
3692   SDLoc dl = getCurSDLoc();
3693   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3694   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3695   setValue(&I, DAG.getNode(ISD::FP_ROUND, dl, DestVT, N,
3696                            DAG.getTargetConstant(
3697                                0, dl, TLI.getPointerTy(DAG.getDataLayout()))));
3698 }
3699 
3700 void SelectionDAGBuilder::visitFPExt(const User &I) {
3701   // FPExt is never a no-op cast, no need to check
3702   SDValue N = getValue(I.getOperand(0));
3703   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3704                                                         I.getType());
3705   setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurSDLoc(), DestVT, N));
3706 }
3707 
3708 void SelectionDAGBuilder::visitFPToUI(const User &I) {
3709   // FPToUI is never a no-op cast, no need to check
3710   SDValue N = getValue(I.getOperand(0));
3711   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3712                                                         I.getType());
3713   setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurSDLoc(), DestVT, N));
3714 }
3715 
3716 void SelectionDAGBuilder::visitFPToSI(const User &I) {
3717   // FPToSI is never a no-op cast, no need to check
3718   SDValue N = getValue(I.getOperand(0));
3719   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3720                                                         I.getType());
3721   setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurSDLoc(), DestVT, N));
3722 }
3723 
3724 void SelectionDAGBuilder::visitUIToFP(const User &I) {
3725   // UIToFP is never a no-op cast, no need to check
3726   SDValue N = getValue(I.getOperand(0));
3727   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3728                                                         I.getType());
3729   setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurSDLoc(), DestVT, N));
3730 }
3731 
3732 void SelectionDAGBuilder::visitSIToFP(const User &I) {
3733   // SIToFP is never a no-op cast, no need to check
3734   SDValue N = getValue(I.getOperand(0));
3735   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3736                                                         I.getType());
3737   setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurSDLoc(), DestVT, N));
3738 }
3739 
3740 void SelectionDAGBuilder::visitPtrToInt(const User &I) {
3741   // What to do depends on the size of the integer and the size of the pointer.
3742   // We can either truncate, zero extend, or no-op, accordingly.
3743   SDValue N = getValue(I.getOperand(0));
3744   auto &TLI = DAG.getTargetLoweringInfo();
3745   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3746                                                         I.getType());
3747   EVT PtrMemVT =
3748       TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
3749   N = DAG.getPtrExtOrTrunc(N, getCurSDLoc(), PtrMemVT);
3750   N = DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT);
3751   setValue(&I, N);
3752 }
3753 
3754 void SelectionDAGBuilder::visitIntToPtr(const User &I) {
3755   // What to do depends on the size of the integer and the size of the pointer.
3756   // We can either truncate, zero extend, or no-op, accordingly.
3757   SDValue N = getValue(I.getOperand(0));
3758   auto &TLI = DAG.getTargetLoweringInfo();
3759   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3760   EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getType());
3761   N = DAG.getZExtOrTrunc(N, getCurSDLoc(), PtrMemVT);
3762   N = DAG.getPtrExtOrTrunc(N, getCurSDLoc(), DestVT);
3763   setValue(&I, N);
3764 }
3765 
3766 void SelectionDAGBuilder::visitBitCast(const User &I) {
3767   SDValue N = getValue(I.getOperand(0));
3768   SDLoc dl = getCurSDLoc();
3769   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3770                                                         I.getType());
3771 
3772   // BitCast assures us that source and destination are the same size so this is
3773   // either a BITCAST or a no-op.
3774   if (DestVT != N.getValueType())
3775     setValue(&I, DAG.getNode(ISD::BITCAST, dl,
3776                              DestVT, N)); // convert types.
3777   // Check if the original LLVM IR Operand was a ConstantInt, because getValue()
3778   // might fold any kind of constant expression to an integer constant and that
3779   // is not what we are looking for. Only recognize a bitcast of a genuine
3780   // constant integer as an opaque constant.
3781   else if(ConstantInt *C = dyn_cast<ConstantInt>(I.getOperand(0)))
3782     setValue(&I, DAG.getConstant(C->getValue(), dl, DestVT, /*isTarget=*/false,
3783                                  /*isOpaque*/true));
3784   else
3785     setValue(&I, N);            // noop cast.
3786 }
3787 
3788 void SelectionDAGBuilder::visitAddrSpaceCast(const User &I) {
3789   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3790   const Value *SV = I.getOperand(0);
3791   SDValue N = getValue(SV);
3792   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3793 
3794   unsigned SrcAS = SV->getType()->getPointerAddressSpace();
3795   unsigned DestAS = I.getType()->getPointerAddressSpace();
3796 
3797   if (!TM.isNoopAddrSpaceCast(SrcAS, DestAS))
3798     N = DAG.getAddrSpaceCast(getCurSDLoc(), DestVT, N, SrcAS, DestAS);
3799 
3800   setValue(&I, N);
3801 }
3802 
3803 void SelectionDAGBuilder::visitInsertElement(const User &I) {
3804   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3805   SDValue InVec = getValue(I.getOperand(0));
3806   SDValue InVal = getValue(I.getOperand(1));
3807   SDValue InIdx = DAG.getZExtOrTrunc(getValue(I.getOperand(2)), getCurSDLoc(),
3808                                      TLI.getVectorIdxTy(DAG.getDataLayout()));
3809   setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurSDLoc(),
3810                            TLI.getValueType(DAG.getDataLayout(), I.getType()),
3811                            InVec, InVal, InIdx));
3812 }
3813 
3814 void SelectionDAGBuilder::visitExtractElement(const User &I) {
3815   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3816   SDValue InVec = getValue(I.getOperand(0));
3817   SDValue InIdx = DAG.getZExtOrTrunc(getValue(I.getOperand(1)), getCurSDLoc(),
3818                                      TLI.getVectorIdxTy(DAG.getDataLayout()));
3819   setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(),
3820                            TLI.getValueType(DAG.getDataLayout(), I.getType()),
3821                            InVec, InIdx));
3822 }
3823 
3824 void SelectionDAGBuilder::visitShuffleVector(const User &I) {
3825   SDValue Src1 = getValue(I.getOperand(0));
3826   SDValue Src2 = getValue(I.getOperand(1));
3827   ArrayRef<int> Mask;
3828   if (auto *SVI = dyn_cast<ShuffleVectorInst>(&I))
3829     Mask = SVI->getShuffleMask();
3830   else
3831     Mask = cast<ConstantExpr>(I).getShuffleMask();
3832   SDLoc DL = getCurSDLoc();
3833   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3834   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3835   EVT SrcVT = Src1.getValueType();
3836 
3837   if (all_of(Mask, [](int Elem) { return Elem == 0; }) &&
3838       VT.isScalableVector()) {
3839     // Canonical splat form of first element of first input vector.
3840     SDValue FirstElt =
3841         DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SrcVT.getScalarType(), Src1,
3842                     DAG.getVectorIdxConstant(0, DL));
3843     setValue(&I, DAG.getNode(ISD::SPLAT_VECTOR, DL, VT, FirstElt));
3844     return;
3845   }
3846 
3847   // For now, we only handle splats for scalable vectors.
3848   // The DAGCombiner will perform a BUILD_VECTOR -> SPLAT_VECTOR transformation
3849   // for targets that support a SPLAT_VECTOR for non-scalable vector types.
3850   assert(!VT.isScalableVector() && "Unsupported scalable vector shuffle");
3851 
3852   unsigned SrcNumElts = SrcVT.getVectorNumElements();
3853   unsigned MaskNumElts = Mask.size();
3854 
3855   if (SrcNumElts == MaskNumElts) {
3856     setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, Mask));
3857     return;
3858   }
3859 
3860   // Normalize the shuffle vector since mask and vector length don't match.
3861   if (SrcNumElts < MaskNumElts) {
3862     // Mask is longer than the source vectors. We can use concatenate vector to
3863     // make the mask and vectors lengths match.
3864 
3865     if (MaskNumElts % SrcNumElts == 0) {
3866       // Mask length is a multiple of the source vector length.
3867       // Check if the shuffle is some kind of concatenation of the input
3868       // vectors.
3869       unsigned NumConcat = MaskNumElts / SrcNumElts;
3870       bool IsConcat = true;
3871       SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
3872       for (unsigned i = 0; i != MaskNumElts; ++i) {
3873         int Idx = Mask[i];
3874         if (Idx < 0)
3875           continue;
3876         // Ensure the indices in each SrcVT sized piece are sequential and that
3877         // the same source is used for the whole piece.
3878         if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
3879             (ConcatSrcs[i / SrcNumElts] >= 0 &&
3880              ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts))) {
3881           IsConcat = false;
3882           break;
3883         }
3884         // Remember which source this index came from.
3885         ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
3886       }
3887 
3888       // The shuffle is concatenating multiple vectors together. Just emit
3889       // a CONCAT_VECTORS operation.
3890       if (IsConcat) {
3891         SmallVector<SDValue, 8> ConcatOps;
3892         for (auto Src : ConcatSrcs) {
3893           if (Src < 0)
3894             ConcatOps.push_back(DAG.getUNDEF(SrcVT));
3895           else if (Src == 0)
3896             ConcatOps.push_back(Src1);
3897           else
3898             ConcatOps.push_back(Src2);
3899         }
3900         setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps));
3901         return;
3902       }
3903     }
3904 
3905     unsigned PaddedMaskNumElts = alignTo(MaskNumElts, SrcNumElts);
3906     unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
3907     EVT PaddedVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(),
3908                                     PaddedMaskNumElts);
3909 
3910     // Pad both vectors with undefs to make them the same length as the mask.
3911     SDValue UndefVal = DAG.getUNDEF(SrcVT);
3912 
3913     SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
3914     SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
3915     MOps1[0] = Src1;
3916     MOps2[0] = Src2;
3917 
3918     Src1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps1);
3919     Src2 = DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps2);
3920 
3921     // Readjust mask for new input vector length.
3922     SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1);
3923     for (unsigned i = 0; i != MaskNumElts; ++i) {
3924       int Idx = Mask[i];
3925       if (Idx >= (int)SrcNumElts)
3926         Idx -= SrcNumElts - PaddedMaskNumElts;
3927       MappedOps[i] = Idx;
3928     }
3929 
3930     SDValue Result = DAG.getVectorShuffle(PaddedVT, DL, Src1, Src2, MappedOps);
3931 
3932     // If the concatenated vector was padded, extract a subvector with the
3933     // correct number of elements.
3934     if (MaskNumElts != PaddedMaskNumElts)
3935       Result = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Result,
3936                            DAG.getVectorIdxConstant(0, DL));
3937 
3938     setValue(&I, Result);
3939     return;
3940   }
3941 
3942   if (SrcNumElts > MaskNumElts) {
3943     // Analyze the access pattern of the vector to see if we can extract
3944     // two subvectors and do the shuffle.
3945     int StartIdx[2] = { -1, -1 };  // StartIdx to extract from
3946     bool CanExtract = true;
3947     for (int Idx : Mask) {
3948       unsigned Input = 0;
3949       if (Idx < 0)
3950         continue;
3951 
3952       if (Idx >= (int)SrcNumElts) {
3953         Input = 1;
3954         Idx -= SrcNumElts;
3955       }
3956 
3957       // If all the indices come from the same MaskNumElts sized portion of
3958       // the sources we can use extract. Also make sure the extract wouldn't
3959       // extract past the end of the source.
3960       int NewStartIdx = alignDown(Idx, MaskNumElts);
3961       if (NewStartIdx + MaskNumElts > SrcNumElts ||
3962           (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
3963         CanExtract = false;
3964       // Make sure we always update StartIdx as we use it to track if all
3965       // elements are undef.
3966       StartIdx[Input] = NewStartIdx;
3967     }
3968 
3969     if (StartIdx[0] < 0 && StartIdx[1] < 0) {
3970       setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
3971       return;
3972     }
3973     if (CanExtract) {
3974       // Extract appropriate subvector and generate a vector shuffle
3975       for (unsigned Input = 0; Input < 2; ++Input) {
3976         SDValue &Src = Input == 0 ? Src1 : Src2;
3977         if (StartIdx[Input] < 0)
3978           Src = DAG.getUNDEF(VT);
3979         else {
3980           Src = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Src,
3981                             DAG.getVectorIdxConstant(StartIdx[Input], DL));
3982         }
3983       }
3984 
3985       // Calculate new mask.
3986       SmallVector<int, 8> MappedOps(Mask);
3987       for (int &Idx : MappedOps) {
3988         if (Idx >= (int)SrcNumElts)
3989           Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
3990         else if (Idx >= 0)
3991           Idx -= StartIdx[0];
3992       }
3993 
3994       setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, MappedOps));
3995       return;
3996     }
3997   }
3998 
3999   // We can't use either concat vectors or extract subvectors so fall back to
4000   // replacing the shuffle with extract and build vector.
4001   // to insert and build vector.
4002   EVT EltVT = VT.getVectorElementType();
4003   SmallVector<SDValue,8> Ops;
4004   for (int Idx : Mask) {
4005     SDValue Res;
4006 
4007     if (Idx < 0) {
4008       Res = DAG.getUNDEF(EltVT);
4009     } else {
4010       SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
4011       if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts;
4012 
4013       Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Src,
4014                         DAG.getVectorIdxConstant(Idx, DL));
4015     }
4016 
4017     Ops.push_back(Res);
4018   }
4019 
4020   setValue(&I, DAG.getBuildVector(VT, DL, Ops));
4021 }
4022 
4023 void SelectionDAGBuilder::visitInsertValue(const InsertValueInst &I) {
4024   ArrayRef<unsigned> Indices = I.getIndices();
4025   const Value *Op0 = I.getOperand(0);
4026   const Value *Op1 = I.getOperand(1);
4027   Type *AggTy = I.getType();
4028   Type *ValTy = Op1->getType();
4029   bool IntoUndef = isa<UndefValue>(Op0);
4030   bool FromUndef = isa<UndefValue>(Op1);
4031 
4032   unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
4033 
4034   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4035   SmallVector<EVT, 4> AggValueVTs;
4036   ComputeValueVTs(TLI, DAG.getDataLayout(), AggTy, AggValueVTs);
4037   SmallVector<EVT, 4> ValValueVTs;
4038   ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
4039 
4040   unsigned NumAggValues = AggValueVTs.size();
4041   unsigned NumValValues = ValValueVTs.size();
4042   SmallVector<SDValue, 4> Values(NumAggValues);
4043 
4044   // Ignore an insertvalue that produces an empty object
4045   if (!NumAggValues) {
4046     setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
4047     return;
4048   }
4049 
4050   SDValue Agg = getValue(Op0);
4051   unsigned i = 0;
4052   // Copy the beginning value(s) from the original aggregate.
4053   for (; i != LinearIndex; ++i)
4054     Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
4055                 SDValue(Agg.getNode(), Agg.getResNo() + i);
4056   // Copy values from the inserted value(s).
4057   if (NumValValues) {
4058     SDValue Val = getValue(Op1);
4059     for (; i != LinearIndex + NumValValues; ++i)
4060       Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
4061                   SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
4062   }
4063   // Copy remaining value(s) from the original aggregate.
4064   for (; i != NumAggValues; ++i)
4065     Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
4066                 SDValue(Agg.getNode(), Agg.getResNo() + i);
4067 
4068   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
4069                            DAG.getVTList(AggValueVTs), Values));
4070 }
4071 
4072 void SelectionDAGBuilder::visitExtractValue(const ExtractValueInst &I) {
4073   ArrayRef<unsigned> Indices = I.getIndices();
4074   const Value *Op0 = I.getOperand(0);
4075   Type *AggTy = Op0->getType();
4076   Type *ValTy = I.getType();
4077   bool OutOfUndef = isa<UndefValue>(Op0);
4078 
4079   unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
4080 
4081   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4082   SmallVector<EVT, 4> ValValueVTs;
4083   ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
4084 
4085   unsigned NumValValues = ValValueVTs.size();
4086 
4087   // Ignore a extractvalue that produces an empty object
4088   if (!NumValValues) {
4089     setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
4090     return;
4091   }
4092 
4093   SmallVector<SDValue, 4> Values(NumValValues);
4094 
4095   SDValue Agg = getValue(Op0);
4096   // Copy out the selected value(s).
4097   for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
4098     Values[i - LinearIndex] =
4099       OutOfUndef ?
4100         DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
4101         SDValue(Agg.getNode(), Agg.getResNo() + i);
4102 
4103   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
4104                            DAG.getVTList(ValValueVTs), Values));
4105 }
4106 
4107 void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
4108   Value *Op0 = I.getOperand(0);
4109   // Note that the pointer operand may be a vector of pointers. Take the scalar
4110   // element which holds a pointer.
4111   unsigned AS = Op0->getType()->getScalarType()->getPointerAddressSpace();
4112   SDValue N = getValue(Op0);
4113   SDLoc dl = getCurSDLoc();
4114   auto &TLI = DAG.getTargetLoweringInfo();
4115 
4116   // Normalize Vector GEP - all scalar operands should be converted to the
4117   // splat vector.
4118   bool IsVectorGEP = I.getType()->isVectorTy();
4119   ElementCount VectorElementCount =
4120       IsVectorGEP ? cast<VectorType>(I.getType())->getElementCount()
4121                   : ElementCount::getFixed(0);
4122 
4123   if (IsVectorGEP && !N.getValueType().isVector()) {
4124     LLVMContext &Context = *DAG.getContext();
4125     EVT VT = EVT::getVectorVT(Context, N.getValueType(), VectorElementCount);
4126     N = DAG.getSplat(VT, dl, N);
4127   }
4128 
4129   for (gep_type_iterator GTI = gep_type_begin(&I), E = gep_type_end(&I);
4130        GTI != E; ++GTI) {
4131     const Value *Idx = GTI.getOperand();
4132     if (StructType *StTy = GTI.getStructTypeOrNull()) {
4133       unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
4134       if (Field) {
4135         // N = N + Offset
4136         uint64_t Offset =
4137             DAG.getDataLayout().getStructLayout(StTy)->getElementOffset(Field);
4138 
4139         // In an inbounds GEP with an offset that is nonnegative even when
4140         // interpreted as signed, assume there is no unsigned overflow.
4141         SDNodeFlags Flags;
4142         if (int64_t(Offset) >= 0 && cast<GEPOperator>(I).isInBounds())
4143           Flags.setNoUnsignedWrap(true);
4144 
4145         N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N,
4146                         DAG.getConstant(Offset, dl, N.getValueType()), Flags);
4147       }
4148     } else {
4149       // IdxSize is the width of the arithmetic according to IR semantics.
4150       // In SelectionDAG, we may prefer to do arithmetic in a wider bitwidth
4151       // (and fix up the result later).
4152       unsigned IdxSize = DAG.getDataLayout().getIndexSizeInBits(AS);
4153       MVT IdxTy = MVT::getIntegerVT(IdxSize);
4154       TypeSize ElementSize =
4155           GTI.getSequentialElementStride(DAG.getDataLayout());
4156       // We intentionally mask away the high bits here; ElementSize may not
4157       // fit in IdxTy.
4158       APInt ElementMul(IdxSize, ElementSize.getKnownMinValue());
4159       bool ElementScalable = ElementSize.isScalable();
4160 
4161       // If this is a scalar constant or a splat vector of constants,
4162       // handle it quickly.
4163       const auto *C = dyn_cast<Constant>(Idx);
4164       if (C && isa<VectorType>(C->getType()))
4165         C = C->getSplatValue();
4166 
4167       const auto *CI = dyn_cast_or_null<ConstantInt>(C);
4168       if (CI && CI->isZero())
4169         continue;
4170       if (CI && !ElementScalable) {
4171         APInt Offs = ElementMul * CI->getValue().sextOrTrunc(IdxSize);
4172         LLVMContext &Context = *DAG.getContext();
4173         SDValue OffsVal;
4174         if (IsVectorGEP)
4175           OffsVal = DAG.getConstant(
4176               Offs, dl, EVT::getVectorVT(Context, IdxTy, VectorElementCount));
4177         else
4178           OffsVal = DAG.getConstant(Offs, dl, IdxTy);
4179 
4180         // In an inbounds GEP with an offset that is nonnegative even when
4181         // interpreted as signed, assume there is no unsigned overflow.
4182         SDNodeFlags Flags;
4183         if (Offs.isNonNegative() && cast<GEPOperator>(I).isInBounds())
4184           Flags.setNoUnsignedWrap(true);
4185 
4186         OffsVal = DAG.getSExtOrTrunc(OffsVal, dl, N.getValueType());
4187 
4188         N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N, OffsVal, Flags);
4189         continue;
4190       }
4191 
4192       // N = N + Idx * ElementMul;
4193       SDValue IdxN = getValue(Idx);
4194 
4195       if (!IdxN.getValueType().isVector() && IsVectorGEP) {
4196         EVT VT = EVT::getVectorVT(*Context, IdxN.getValueType(),
4197                                   VectorElementCount);
4198         IdxN = DAG.getSplat(VT, dl, IdxN);
4199       }
4200 
4201       // If the index is smaller or larger than intptr_t, truncate or extend
4202       // it.
4203       IdxN = DAG.getSExtOrTrunc(IdxN, dl, N.getValueType());
4204 
4205       if (ElementScalable) {
4206         EVT VScaleTy = N.getValueType().getScalarType();
4207         SDValue VScale = DAG.getNode(
4208             ISD::VSCALE, dl, VScaleTy,
4209             DAG.getConstant(ElementMul.getZExtValue(), dl, VScaleTy));
4210         if (IsVectorGEP)
4211           VScale = DAG.getSplatVector(N.getValueType(), dl, VScale);
4212         IdxN = DAG.getNode(ISD::MUL, dl, N.getValueType(), IdxN, VScale);
4213       } else {
4214         // If this is a multiply by a power of two, turn it into a shl
4215         // immediately.  This is a very common case.
4216         if (ElementMul != 1) {
4217           if (ElementMul.isPowerOf2()) {
4218             unsigned Amt = ElementMul.logBase2();
4219             IdxN = DAG.getNode(ISD::SHL, dl,
4220                                N.getValueType(), IdxN,
4221                                DAG.getConstant(Amt, dl, IdxN.getValueType()));
4222           } else {
4223             SDValue Scale = DAG.getConstant(ElementMul.getZExtValue(), dl,
4224                                             IdxN.getValueType());
4225             IdxN = DAG.getNode(ISD::MUL, dl,
4226                                N.getValueType(), IdxN, Scale);
4227           }
4228         }
4229       }
4230 
4231       N = DAG.getNode(ISD::ADD, dl,
4232                       N.getValueType(), N, IdxN);
4233     }
4234   }
4235 
4236   MVT PtrTy = TLI.getPointerTy(DAG.getDataLayout(), AS);
4237   MVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout(), AS);
4238   if (IsVectorGEP) {
4239     PtrTy = MVT::getVectorVT(PtrTy, VectorElementCount);
4240     PtrMemTy = MVT::getVectorVT(PtrMemTy, VectorElementCount);
4241   }
4242 
4243   if (PtrMemTy != PtrTy && !cast<GEPOperator>(I).isInBounds())
4244     N = DAG.getPtrExtendInReg(N, dl, PtrMemTy);
4245 
4246   setValue(&I, N);
4247 }
4248 
4249 void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
4250   // If this is a fixed sized alloca in the entry block of the function,
4251   // allocate it statically on the stack.
4252   if (FuncInfo.StaticAllocaMap.count(&I))
4253     return;   // getValue will auto-populate this.
4254 
4255   SDLoc dl = getCurSDLoc();
4256   Type *Ty = I.getAllocatedType();
4257   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4258   auto &DL = DAG.getDataLayout();
4259   TypeSize TySize = DL.getTypeAllocSize(Ty);
4260   MaybeAlign Alignment = std::max(DL.getPrefTypeAlign(Ty), I.getAlign());
4261 
4262   SDValue AllocSize = getValue(I.getArraySize());
4263 
4264   EVT IntPtr = TLI.getPointerTy(DL, I.getAddressSpace());
4265   if (AllocSize.getValueType() != IntPtr)
4266     AllocSize = DAG.getZExtOrTrunc(AllocSize, dl, IntPtr);
4267 
4268   if (TySize.isScalable())
4269     AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr, AllocSize,
4270                             DAG.getVScale(dl, IntPtr,
4271                                           APInt(IntPtr.getScalarSizeInBits(),
4272                                                 TySize.getKnownMinValue())));
4273   else {
4274     SDValue TySizeValue =
4275         DAG.getConstant(TySize.getFixedValue(), dl, MVT::getIntegerVT(64));
4276     AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr, AllocSize,
4277                             DAG.getZExtOrTrunc(TySizeValue, dl, IntPtr));
4278   }
4279 
4280   // Handle alignment.  If the requested alignment is less than or equal to
4281   // the stack alignment, ignore it.  If the size is greater than or equal to
4282   // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
4283   Align StackAlign = DAG.getSubtarget().getFrameLowering()->getStackAlign();
4284   if (*Alignment <= StackAlign)
4285     Alignment = std::nullopt;
4286 
4287   const uint64_t StackAlignMask = StackAlign.value() - 1U;
4288   // Round the size of the allocation up to the stack alignment size
4289   // by add SA-1 to the size. This doesn't overflow because we're computing
4290   // an address inside an alloca.
4291   SDNodeFlags Flags;
4292   Flags.setNoUnsignedWrap(true);
4293   AllocSize = DAG.getNode(ISD::ADD, dl, AllocSize.getValueType(), AllocSize,
4294                           DAG.getConstant(StackAlignMask, dl, IntPtr), Flags);
4295 
4296   // Mask out the low bits for alignment purposes.
4297   AllocSize = DAG.getNode(ISD::AND, dl, AllocSize.getValueType(), AllocSize,
4298                           DAG.getConstant(~StackAlignMask, dl, IntPtr));
4299 
4300   SDValue Ops[] = {
4301       getRoot(), AllocSize,
4302       DAG.getConstant(Alignment ? Alignment->value() : 0, dl, IntPtr)};
4303   SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
4304   SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, dl, VTs, Ops);
4305   setValue(&I, DSA);
4306   DAG.setRoot(DSA.getValue(1));
4307 
4308   assert(FuncInfo.MF->getFrameInfo().hasVarSizedObjects());
4309 }
4310 
4311 static const MDNode *getRangeMetadata(const Instruction &I) {
4312   // If !noundef is not present, then !range violation results in a poison
4313   // value rather than immediate undefined behavior. In theory, transferring
4314   // these annotations to SDAG is fine, but in practice there are key SDAG
4315   // transforms that are known not to be poison-safe, such as folding logical
4316   // and/or to bitwise and/or. For now, only transfer !range if !noundef is
4317   // also present.
4318   if (!I.hasMetadata(LLVMContext::MD_noundef))
4319     return nullptr;
4320   return I.getMetadata(LLVMContext::MD_range);
4321 }
4322 
4323 void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
4324   if (I.isAtomic())
4325     return visitAtomicLoad(I);
4326 
4327   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4328   const Value *SV = I.getOperand(0);
4329   if (TLI.supportSwiftError()) {
4330     // Swifterror values can come from either a function parameter with
4331     // swifterror attribute or an alloca with swifterror attribute.
4332     if (const Argument *Arg = dyn_cast<Argument>(SV)) {
4333       if (Arg->hasSwiftErrorAttr())
4334         return visitLoadFromSwiftError(I);
4335     }
4336 
4337     if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
4338       if (Alloca->isSwiftError())
4339         return visitLoadFromSwiftError(I);
4340     }
4341   }
4342 
4343   SDValue Ptr = getValue(SV);
4344 
4345   Type *Ty = I.getType();
4346   SmallVector<EVT, 4> ValueVTs, MemVTs;
4347   SmallVector<TypeSize, 4> Offsets;
4348   ComputeValueVTs(TLI, DAG.getDataLayout(), Ty, ValueVTs, &MemVTs, &Offsets);
4349   unsigned NumValues = ValueVTs.size();
4350   if (NumValues == 0)
4351     return;
4352 
4353   Align Alignment = I.getAlign();
4354   AAMDNodes AAInfo = I.getAAMetadata();
4355   const MDNode *Ranges = getRangeMetadata(I);
4356   bool isVolatile = I.isVolatile();
4357   MachineMemOperand::Flags MMOFlags =
4358       TLI.getLoadMemOperandFlags(I, DAG.getDataLayout(), AC, LibInfo);
4359 
4360   SDValue Root;
4361   bool ConstantMemory = false;
4362   if (isVolatile)
4363     // Serialize volatile loads with other side effects.
4364     Root = getRoot();
4365   else if (NumValues > MaxParallelChains)
4366     Root = getMemoryRoot();
4367   else if (AA &&
4368            AA->pointsToConstantMemory(MemoryLocation(
4369                SV,
4370                LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)),
4371                AAInfo))) {
4372     // Do not serialize (non-volatile) loads of constant memory with anything.
4373     Root = DAG.getEntryNode();
4374     ConstantMemory = true;
4375     MMOFlags |= MachineMemOperand::MOInvariant;
4376   } else {
4377     // Do not serialize non-volatile loads against each other.
4378     Root = DAG.getRoot();
4379   }
4380 
4381   SDLoc dl = getCurSDLoc();
4382 
4383   if (isVolatile)
4384     Root = TLI.prepareVolatileOrAtomicLoad(Root, dl, DAG);
4385 
4386   SmallVector<SDValue, 4> Values(NumValues);
4387   SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
4388 
4389   unsigned ChainI = 0;
4390   for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4391     // Serializing loads here may result in excessive register pressure, and
4392     // TokenFactor places arbitrary choke points on the scheduler. SD scheduling
4393     // could recover a bit by hoisting nodes upward in the chain by recognizing
4394     // they are side-effect free or do not alias. The optimizer should really
4395     // avoid this case by converting large object/array copies to llvm.memcpy
4396     // (MaxParallelChains should always remain as failsafe).
4397     if (ChainI == MaxParallelChains) {
4398       assert(PendingLoads.empty() && "PendingLoads must be serialized first");
4399       SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4400                                   ArrayRef(Chains.data(), ChainI));
4401       Root = Chain;
4402       ChainI = 0;
4403     }
4404 
4405     // TODO: MachinePointerInfo only supports a fixed length offset.
4406     MachinePointerInfo PtrInfo =
4407         !Offsets[i].isScalable() || Offsets[i].isZero()
4408             ? MachinePointerInfo(SV, Offsets[i].getKnownMinValue())
4409             : MachinePointerInfo();
4410 
4411     SDValue A = DAG.getObjectPtrOffset(dl, Ptr, Offsets[i]);
4412     SDValue L = DAG.getLoad(MemVTs[i], dl, Root, A, PtrInfo, Alignment,
4413                             MMOFlags, AAInfo, Ranges);
4414     Chains[ChainI] = L.getValue(1);
4415 
4416     if (MemVTs[i] != ValueVTs[i])
4417       L = DAG.getPtrExtOrTrunc(L, dl, ValueVTs[i]);
4418 
4419     Values[i] = L;
4420   }
4421 
4422   if (!ConstantMemory) {
4423     SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4424                                 ArrayRef(Chains.data(), ChainI));
4425     if (isVolatile)
4426       DAG.setRoot(Chain);
4427     else
4428       PendingLoads.push_back(Chain);
4429   }
4430 
4431   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, dl,
4432                            DAG.getVTList(ValueVTs), Values));
4433 }
4434 
4435 void SelectionDAGBuilder::visitStoreToSwiftError(const StoreInst &I) {
4436   assert(DAG.getTargetLoweringInfo().supportSwiftError() &&
4437          "call visitStoreToSwiftError when backend supports swifterror");
4438 
4439   SmallVector<EVT, 4> ValueVTs;
4440   SmallVector<uint64_t, 4> Offsets;
4441   const Value *SrcV = I.getOperand(0);
4442   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
4443                   SrcV->getType(), ValueVTs, &Offsets, 0);
4444   assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
4445          "expect a single EVT for swifterror");
4446 
4447   SDValue Src = getValue(SrcV);
4448   // Create a virtual register, then update the virtual register.
4449   Register VReg =
4450       SwiftError.getOrCreateVRegDefAt(&I, FuncInfo.MBB, I.getPointerOperand());
4451   // Chain, DL, Reg, N or Chain, DL, Reg, N, Glue
4452   // Chain can be getRoot or getControlRoot.
4453   SDValue CopyNode = DAG.getCopyToReg(getRoot(), getCurSDLoc(), VReg,
4454                                       SDValue(Src.getNode(), Src.getResNo()));
4455   DAG.setRoot(CopyNode);
4456 }
4457 
4458 void SelectionDAGBuilder::visitLoadFromSwiftError(const LoadInst &I) {
4459   assert(DAG.getTargetLoweringInfo().supportSwiftError() &&
4460          "call visitLoadFromSwiftError when backend supports swifterror");
4461 
4462   assert(!I.isVolatile() &&
4463          !I.hasMetadata(LLVMContext::MD_nontemporal) &&
4464          !I.hasMetadata(LLVMContext::MD_invariant_load) &&
4465          "Support volatile, non temporal, invariant for load_from_swift_error");
4466 
4467   const Value *SV = I.getOperand(0);
4468   Type *Ty = I.getType();
4469   assert(
4470       (!AA ||
4471        !AA->pointsToConstantMemory(MemoryLocation(
4472            SV, LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)),
4473            I.getAAMetadata()))) &&
4474       "load_from_swift_error should not be constant memory");
4475 
4476   SmallVector<EVT, 4> ValueVTs;
4477   SmallVector<uint64_t, 4> Offsets;
4478   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), Ty,
4479                   ValueVTs, &Offsets, 0);
4480   assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
4481          "expect a single EVT for swifterror");
4482 
4483   // Chain, DL, Reg, VT, Glue or Chain, DL, Reg, VT
4484   SDValue L = DAG.getCopyFromReg(
4485       getRoot(), getCurSDLoc(),
4486       SwiftError.getOrCreateVRegUseAt(&I, FuncInfo.MBB, SV), ValueVTs[0]);
4487 
4488   setValue(&I, L);
4489 }
4490 
4491 void SelectionDAGBuilder::visitStore(const StoreInst &I) {
4492   if (I.isAtomic())
4493     return visitAtomicStore(I);
4494 
4495   const Value *SrcV = I.getOperand(0);
4496   const Value *PtrV = I.getOperand(1);
4497 
4498   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4499   if (TLI.supportSwiftError()) {
4500     // Swifterror values can come from either a function parameter with
4501     // swifterror attribute or an alloca with swifterror attribute.
4502     if (const Argument *Arg = dyn_cast<Argument>(PtrV)) {
4503       if (Arg->hasSwiftErrorAttr())
4504         return visitStoreToSwiftError(I);
4505     }
4506 
4507     if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
4508       if (Alloca->isSwiftError())
4509         return visitStoreToSwiftError(I);
4510     }
4511   }
4512 
4513   SmallVector<EVT, 4> ValueVTs, MemVTs;
4514   SmallVector<TypeSize, 4> Offsets;
4515   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
4516                   SrcV->getType(), ValueVTs, &MemVTs, &Offsets);
4517   unsigned NumValues = ValueVTs.size();
4518   if (NumValues == 0)
4519     return;
4520 
4521   // Get the lowered operands. Note that we do this after
4522   // checking if NumResults is zero, because with zero results
4523   // the operands won't have values in the map.
4524   SDValue Src = getValue(SrcV);
4525   SDValue Ptr = getValue(PtrV);
4526 
4527   SDValue Root = I.isVolatile() ? getRoot() : getMemoryRoot();
4528   SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
4529   SDLoc dl = getCurSDLoc();
4530   Align Alignment = I.getAlign();
4531   AAMDNodes AAInfo = I.getAAMetadata();
4532 
4533   auto MMOFlags = TLI.getStoreMemOperandFlags(I, DAG.getDataLayout());
4534 
4535   unsigned ChainI = 0;
4536   for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4537     // See visitLoad comments.
4538     if (ChainI == MaxParallelChains) {
4539       SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4540                                   ArrayRef(Chains.data(), ChainI));
4541       Root = Chain;
4542       ChainI = 0;
4543     }
4544 
4545     // TODO: MachinePointerInfo only supports a fixed length offset.
4546     MachinePointerInfo PtrInfo =
4547         !Offsets[i].isScalable() || Offsets[i].isZero()
4548             ? MachinePointerInfo(PtrV, Offsets[i].getKnownMinValue())
4549             : MachinePointerInfo();
4550 
4551     SDValue Add = DAG.getObjectPtrOffset(dl, Ptr, Offsets[i]);
4552     SDValue Val = SDValue(Src.getNode(), Src.getResNo() + i);
4553     if (MemVTs[i] != ValueVTs[i])
4554       Val = DAG.getPtrExtOrTrunc(Val, dl, MemVTs[i]);
4555     SDValue St =
4556         DAG.getStore(Root, dl, Val, Add, PtrInfo, Alignment, MMOFlags, AAInfo);
4557     Chains[ChainI] = St;
4558   }
4559 
4560   SDValue StoreNode = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4561                                   ArrayRef(Chains.data(), ChainI));
4562   setValue(&I, StoreNode);
4563   DAG.setRoot(StoreNode);
4564 }
4565 
4566 void SelectionDAGBuilder::visitMaskedStore(const CallInst &I,
4567                                            bool IsCompressing) {
4568   SDLoc sdl = getCurSDLoc();
4569 
4570   auto getMaskedStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4571                                MaybeAlign &Alignment) {
4572     // llvm.masked.store.*(Src0, Ptr, alignment, Mask)
4573     Src0 = I.getArgOperand(0);
4574     Ptr = I.getArgOperand(1);
4575     Alignment = cast<ConstantInt>(I.getArgOperand(2))->getMaybeAlignValue();
4576     Mask = I.getArgOperand(3);
4577   };
4578   auto getCompressingStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4579                                     MaybeAlign &Alignment) {
4580     // llvm.masked.compressstore.*(Src0, Ptr, Mask)
4581     Src0 = I.getArgOperand(0);
4582     Ptr = I.getArgOperand(1);
4583     Mask = I.getArgOperand(2);
4584     Alignment = std::nullopt;
4585   };
4586 
4587   Value  *PtrOperand, *MaskOperand, *Src0Operand;
4588   MaybeAlign Alignment;
4589   if (IsCompressing)
4590     getCompressingStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4591   else
4592     getMaskedStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4593 
4594   SDValue Ptr = getValue(PtrOperand);
4595   SDValue Src0 = getValue(Src0Operand);
4596   SDValue Mask = getValue(MaskOperand);
4597   SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
4598 
4599   EVT VT = Src0.getValueType();
4600   if (!Alignment)
4601     Alignment = DAG.getEVTAlign(VT);
4602 
4603   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4604       MachinePointerInfo(PtrOperand), MachineMemOperand::MOStore,
4605       MemoryLocation::UnknownSize, *Alignment, I.getAAMetadata());
4606   SDValue StoreNode =
4607       DAG.getMaskedStore(getMemoryRoot(), sdl, Src0, Ptr, Offset, Mask, VT, MMO,
4608                          ISD::UNINDEXED, false /* Truncating */, IsCompressing);
4609   DAG.setRoot(StoreNode);
4610   setValue(&I, StoreNode);
4611 }
4612 
4613 // Get a uniform base for the Gather/Scatter intrinsic.
4614 // The first argument of the Gather/Scatter intrinsic is a vector of pointers.
4615 // We try to represent it as a base pointer + vector of indices.
4616 // Usually, the vector of pointers comes from a 'getelementptr' instruction.
4617 // The first operand of the GEP may be a single pointer or a vector of pointers
4618 // Example:
4619 //   %gep.ptr = getelementptr i32, <8 x i32*> %vptr, <8 x i32> %ind
4620 //  or
4621 //   %gep.ptr = getelementptr i32, i32* %ptr,        <8 x i32> %ind
4622 // %res = call <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> %gep.ptr, ..
4623 //
4624 // When the first GEP operand is a single pointer - it is the uniform base we
4625 // are looking for. If first operand of the GEP is a splat vector - we
4626 // extract the splat value and use it as a uniform base.
4627 // In all other cases the function returns 'false'.
4628 static bool getUniformBase(const Value *Ptr, SDValue &Base, SDValue &Index,
4629                            ISD::MemIndexType &IndexType, SDValue &Scale,
4630                            SelectionDAGBuilder *SDB, const BasicBlock *CurBB,
4631                            uint64_t ElemSize) {
4632   SelectionDAG& DAG = SDB->DAG;
4633   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4634   const DataLayout &DL = DAG.getDataLayout();
4635 
4636   assert(Ptr->getType()->isVectorTy() && "Unexpected pointer type");
4637 
4638   // Handle splat constant pointer.
4639   if (auto *C = dyn_cast<Constant>(Ptr)) {
4640     C = C->getSplatValue();
4641     if (!C)
4642       return false;
4643 
4644     Base = SDB->getValue(C);
4645 
4646     ElementCount NumElts = cast<VectorType>(Ptr->getType())->getElementCount();
4647     EVT VT = EVT::getVectorVT(*DAG.getContext(), TLI.getPointerTy(DL), NumElts);
4648     Index = DAG.getConstant(0, SDB->getCurSDLoc(), VT);
4649     IndexType = ISD::SIGNED_SCALED;
4650     Scale = DAG.getTargetConstant(1, SDB->getCurSDLoc(), TLI.getPointerTy(DL));
4651     return true;
4652   }
4653 
4654   const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
4655   if (!GEP || GEP->getParent() != CurBB)
4656     return false;
4657 
4658   if (GEP->getNumOperands() != 2)
4659     return false;
4660 
4661   const Value *BasePtr = GEP->getPointerOperand();
4662   const Value *IndexVal = GEP->getOperand(GEP->getNumOperands() - 1);
4663 
4664   // Make sure the base is scalar and the index is a vector.
4665   if (BasePtr->getType()->isVectorTy() || !IndexVal->getType()->isVectorTy())
4666     return false;
4667 
4668   TypeSize ScaleVal = DL.getTypeAllocSize(GEP->getResultElementType());
4669   if (ScaleVal.isScalable())
4670     return false;
4671 
4672   // Target may not support the required addressing mode.
4673   if (ScaleVal != 1 &&
4674       !TLI.isLegalScaleForGatherScatter(ScaleVal.getFixedValue(), ElemSize))
4675     return false;
4676 
4677   Base = SDB->getValue(BasePtr);
4678   Index = SDB->getValue(IndexVal);
4679   IndexType = ISD::SIGNED_SCALED;
4680 
4681   Scale =
4682       DAG.getTargetConstant(ScaleVal, SDB->getCurSDLoc(), TLI.getPointerTy(DL));
4683   return true;
4684 }
4685 
4686 void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
4687   SDLoc sdl = getCurSDLoc();
4688 
4689   // llvm.masked.scatter.*(Src0, Ptrs, alignment, Mask)
4690   const Value *Ptr = I.getArgOperand(1);
4691   SDValue Src0 = getValue(I.getArgOperand(0));
4692   SDValue Mask = getValue(I.getArgOperand(3));
4693   EVT VT = Src0.getValueType();
4694   Align Alignment = cast<ConstantInt>(I.getArgOperand(2))
4695                         ->getMaybeAlignValue()
4696                         .value_or(DAG.getEVTAlign(VT.getScalarType()));
4697   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4698 
4699   SDValue Base;
4700   SDValue Index;
4701   ISD::MemIndexType IndexType;
4702   SDValue Scale;
4703   bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this,
4704                                     I.getParent(), VT.getScalarStoreSize());
4705 
4706   unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
4707   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4708       MachinePointerInfo(AS), MachineMemOperand::MOStore,
4709       // TODO: Make MachineMemOperands aware of scalable
4710       // vectors.
4711       MemoryLocation::UnknownSize, Alignment, I.getAAMetadata());
4712   if (!UniformBase) {
4713     Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4714     Index = getValue(Ptr);
4715     IndexType = ISD::SIGNED_SCALED;
4716     Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4717   }
4718 
4719   EVT IdxVT = Index.getValueType();
4720   EVT EltTy = IdxVT.getVectorElementType();
4721   if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
4722     EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
4723     Index = DAG.getNode(ISD::SIGN_EXTEND, sdl, NewIdxVT, Index);
4724   }
4725 
4726   SDValue Ops[] = { getMemoryRoot(), Src0, Mask, Base, Index, Scale };
4727   SDValue Scatter = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), VT, sdl,
4728                                          Ops, MMO, IndexType, false);
4729   DAG.setRoot(Scatter);
4730   setValue(&I, Scatter);
4731 }
4732 
4733 void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) {
4734   SDLoc sdl = getCurSDLoc();
4735 
4736   auto getMaskedLoadOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4737                               MaybeAlign &Alignment) {
4738     // @llvm.masked.load.*(Ptr, alignment, Mask, Src0)
4739     Ptr = I.getArgOperand(0);
4740     Alignment = cast<ConstantInt>(I.getArgOperand(1))->getMaybeAlignValue();
4741     Mask = I.getArgOperand(2);
4742     Src0 = I.getArgOperand(3);
4743   };
4744   auto getExpandingLoadOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4745                                  MaybeAlign &Alignment) {
4746     // @llvm.masked.expandload.*(Ptr, Mask, Src0)
4747     Ptr = I.getArgOperand(0);
4748     Alignment = std::nullopt;
4749     Mask = I.getArgOperand(1);
4750     Src0 = I.getArgOperand(2);
4751   };
4752 
4753   Value  *PtrOperand, *MaskOperand, *Src0Operand;
4754   MaybeAlign Alignment;
4755   if (IsExpanding)
4756     getExpandingLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4757   else
4758     getMaskedLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4759 
4760   SDValue Ptr = getValue(PtrOperand);
4761   SDValue Src0 = getValue(Src0Operand);
4762   SDValue Mask = getValue(MaskOperand);
4763   SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
4764 
4765   EVT VT = Src0.getValueType();
4766   if (!Alignment)
4767     Alignment = DAG.getEVTAlign(VT);
4768 
4769   AAMDNodes AAInfo = I.getAAMetadata();
4770   const MDNode *Ranges = getRangeMetadata(I);
4771 
4772   // Do not serialize masked loads of constant memory with anything.
4773   MemoryLocation ML = MemoryLocation::getAfter(PtrOperand, AAInfo);
4774   bool AddToChain = !AA || !AA->pointsToConstantMemory(ML);
4775 
4776   SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
4777 
4778   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4779       MachinePointerInfo(PtrOperand), MachineMemOperand::MOLoad,
4780       MemoryLocation::UnknownSize, *Alignment, AAInfo, Ranges);
4781 
4782   SDValue Load =
4783       DAG.getMaskedLoad(VT, sdl, InChain, Ptr, Offset, Mask, Src0, VT, MMO,
4784                         ISD::UNINDEXED, ISD::NON_EXTLOAD, IsExpanding);
4785   if (AddToChain)
4786     PendingLoads.push_back(Load.getValue(1));
4787   setValue(&I, Load);
4788 }
4789 
4790 void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
4791   SDLoc sdl = getCurSDLoc();
4792 
4793   // @llvm.masked.gather.*(Ptrs, alignment, Mask, Src0)
4794   const Value *Ptr = I.getArgOperand(0);
4795   SDValue Src0 = getValue(I.getArgOperand(3));
4796   SDValue Mask = getValue(I.getArgOperand(2));
4797 
4798   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4799   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4800   Align Alignment = cast<ConstantInt>(I.getArgOperand(1))
4801                         ->getMaybeAlignValue()
4802                         .value_or(DAG.getEVTAlign(VT.getScalarType()));
4803 
4804   const MDNode *Ranges = getRangeMetadata(I);
4805 
4806   SDValue Root = DAG.getRoot();
4807   SDValue Base;
4808   SDValue Index;
4809   ISD::MemIndexType IndexType;
4810   SDValue Scale;
4811   bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this,
4812                                     I.getParent(), VT.getScalarStoreSize());
4813   unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
4814   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4815       MachinePointerInfo(AS), MachineMemOperand::MOLoad,
4816       // TODO: Make MachineMemOperands aware of scalable
4817       // vectors.
4818       MemoryLocation::UnknownSize, Alignment, I.getAAMetadata(), Ranges);
4819 
4820   if (!UniformBase) {
4821     Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4822     Index = getValue(Ptr);
4823     IndexType = ISD::SIGNED_SCALED;
4824     Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4825   }
4826 
4827   EVT IdxVT = Index.getValueType();
4828   EVT EltTy = IdxVT.getVectorElementType();
4829   if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
4830     EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
4831     Index = DAG.getNode(ISD::SIGN_EXTEND, sdl, NewIdxVT, Index);
4832   }
4833 
4834   SDValue Ops[] = { Root, Src0, Mask, Base, Index, Scale };
4835   SDValue Gather = DAG.getMaskedGather(DAG.getVTList(VT, MVT::Other), VT, sdl,
4836                                        Ops, MMO, IndexType, ISD::NON_EXTLOAD);
4837 
4838   PendingLoads.push_back(Gather.getValue(1));
4839   setValue(&I, Gather);
4840 }
4841 
4842 void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
4843   SDLoc dl = getCurSDLoc();
4844   AtomicOrdering SuccessOrdering = I.getSuccessOrdering();
4845   AtomicOrdering FailureOrdering = I.getFailureOrdering();
4846   SyncScope::ID SSID = I.getSyncScopeID();
4847 
4848   SDValue InChain = getRoot();
4849 
4850   MVT MemVT = getValue(I.getCompareOperand()).getSimpleValueType();
4851   SDVTList VTs = DAG.getVTList(MemVT, MVT::i1, MVT::Other);
4852 
4853   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4854   auto Flags = TLI.getAtomicMemOperandFlags(I, DAG.getDataLayout());
4855 
4856   MachineFunction &MF = DAG.getMachineFunction();
4857   MachineMemOperand *MMO = MF.getMachineMemOperand(
4858       MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4859       DAG.getEVTAlign(MemVT), AAMDNodes(), nullptr, SSID, SuccessOrdering,
4860       FailureOrdering);
4861 
4862   SDValue L = DAG.getAtomicCmpSwap(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS,
4863                                    dl, MemVT, VTs, InChain,
4864                                    getValue(I.getPointerOperand()),
4865                                    getValue(I.getCompareOperand()),
4866                                    getValue(I.getNewValOperand()), MMO);
4867 
4868   SDValue OutChain = L.getValue(2);
4869 
4870   setValue(&I, L);
4871   DAG.setRoot(OutChain);
4872 }
4873 
4874 void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
4875   SDLoc dl = getCurSDLoc();
4876   ISD::NodeType NT;
4877   switch (I.getOperation()) {
4878   default: llvm_unreachable("Unknown atomicrmw operation");
4879   case AtomicRMWInst::Xchg: NT = ISD::ATOMIC_SWAP; break;
4880   case AtomicRMWInst::Add:  NT = ISD::ATOMIC_LOAD_ADD; break;
4881   case AtomicRMWInst::Sub:  NT = ISD::ATOMIC_LOAD_SUB; break;
4882   case AtomicRMWInst::And:  NT = ISD::ATOMIC_LOAD_AND; break;
4883   case AtomicRMWInst::Nand: NT = ISD::ATOMIC_LOAD_NAND; break;
4884   case AtomicRMWInst::Or:   NT = ISD::ATOMIC_LOAD_OR; break;
4885   case AtomicRMWInst::Xor:  NT = ISD::ATOMIC_LOAD_XOR; break;
4886   case AtomicRMWInst::Max:  NT = ISD::ATOMIC_LOAD_MAX; break;
4887   case AtomicRMWInst::Min:  NT = ISD::ATOMIC_LOAD_MIN; break;
4888   case AtomicRMWInst::UMax: NT = ISD::ATOMIC_LOAD_UMAX; break;
4889   case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break;
4890   case AtomicRMWInst::FAdd: NT = ISD::ATOMIC_LOAD_FADD; break;
4891   case AtomicRMWInst::FSub: NT = ISD::ATOMIC_LOAD_FSUB; break;
4892   case AtomicRMWInst::FMax: NT = ISD::ATOMIC_LOAD_FMAX; break;
4893   case AtomicRMWInst::FMin: NT = ISD::ATOMIC_LOAD_FMIN; break;
4894   case AtomicRMWInst::UIncWrap:
4895     NT = ISD::ATOMIC_LOAD_UINC_WRAP;
4896     break;
4897   case AtomicRMWInst::UDecWrap:
4898     NT = ISD::ATOMIC_LOAD_UDEC_WRAP;
4899     break;
4900   }
4901   AtomicOrdering Ordering = I.getOrdering();
4902   SyncScope::ID SSID = I.getSyncScopeID();
4903 
4904   SDValue InChain = getRoot();
4905 
4906   auto MemVT = getValue(I.getValOperand()).getSimpleValueType();
4907   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4908   auto Flags = TLI.getAtomicMemOperandFlags(I, DAG.getDataLayout());
4909 
4910   MachineFunction &MF = DAG.getMachineFunction();
4911   MachineMemOperand *MMO = MF.getMachineMemOperand(
4912       MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4913       DAG.getEVTAlign(MemVT), AAMDNodes(), nullptr, SSID, Ordering);
4914 
4915   SDValue L =
4916     DAG.getAtomic(NT, dl, MemVT, InChain,
4917                   getValue(I.getPointerOperand()), getValue(I.getValOperand()),
4918                   MMO);
4919 
4920   SDValue OutChain = L.getValue(1);
4921 
4922   setValue(&I, L);
4923   DAG.setRoot(OutChain);
4924 }
4925 
4926 void SelectionDAGBuilder::visitFence(const FenceInst &I) {
4927   SDLoc dl = getCurSDLoc();
4928   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4929   SDValue Ops[3];
4930   Ops[0] = getRoot();
4931   Ops[1] = DAG.getTargetConstant((unsigned)I.getOrdering(), dl,
4932                                  TLI.getFenceOperandTy(DAG.getDataLayout()));
4933   Ops[2] = DAG.getTargetConstant(I.getSyncScopeID(), dl,
4934                                  TLI.getFenceOperandTy(DAG.getDataLayout()));
4935   SDValue N = DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops);
4936   setValue(&I, N);
4937   DAG.setRoot(N);
4938 }
4939 
4940 void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
4941   SDLoc dl = getCurSDLoc();
4942   AtomicOrdering Order = I.getOrdering();
4943   SyncScope::ID SSID = I.getSyncScopeID();
4944 
4945   SDValue InChain = getRoot();
4946 
4947   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4948   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4949   EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getType());
4950 
4951   if (!TLI.supportsUnalignedAtomics() &&
4952       I.getAlign().value() < MemVT.getSizeInBits() / 8)
4953     report_fatal_error("Cannot generate unaligned atomic load");
4954 
4955   auto Flags = TLI.getLoadMemOperandFlags(I, DAG.getDataLayout(), AC, LibInfo);
4956 
4957   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4958       MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4959       I.getAlign(), AAMDNodes(), nullptr, SSID, Order);
4960 
4961   InChain = TLI.prepareVolatileOrAtomicLoad(InChain, dl, DAG);
4962 
4963   SDValue Ptr = getValue(I.getPointerOperand());
4964   SDValue L = DAG.getAtomic(ISD::ATOMIC_LOAD, dl, MemVT, MemVT, InChain,
4965                             Ptr, MMO);
4966 
4967   SDValue OutChain = L.getValue(1);
4968   if (MemVT != VT)
4969     L = DAG.getPtrExtOrTrunc(L, dl, VT);
4970 
4971   setValue(&I, L);
4972   DAG.setRoot(OutChain);
4973 }
4974 
4975 void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
4976   SDLoc dl = getCurSDLoc();
4977 
4978   AtomicOrdering Ordering = I.getOrdering();
4979   SyncScope::ID SSID = I.getSyncScopeID();
4980 
4981   SDValue InChain = getRoot();
4982 
4983   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4984   EVT MemVT =
4985       TLI.getMemValueType(DAG.getDataLayout(), I.getValueOperand()->getType());
4986 
4987   if (!TLI.supportsUnalignedAtomics() &&
4988       I.getAlign().value() < MemVT.getSizeInBits() / 8)
4989     report_fatal_error("Cannot generate unaligned atomic store");
4990 
4991   auto Flags = TLI.getStoreMemOperandFlags(I, DAG.getDataLayout());
4992 
4993   MachineFunction &MF = DAG.getMachineFunction();
4994   MachineMemOperand *MMO = MF.getMachineMemOperand(
4995       MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4996       I.getAlign(), AAMDNodes(), nullptr, SSID, Ordering);
4997 
4998   SDValue Val = getValue(I.getValueOperand());
4999   if (Val.getValueType() != MemVT)
5000     Val = DAG.getPtrExtOrTrunc(Val, dl, MemVT);
5001   SDValue Ptr = getValue(I.getPointerOperand());
5002 
5003   SDValue OutChain =
5004       DAG.getAtomic(ISD::ATOMIC_STORE, dl, MemVT, InChain, Val, Ptr, MMO);
5005 
5006   setValue(&I, OutChain);
5007   DAG.setRoot(OutChain);
5008 }
5009 
5010 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
5011 /// node.
5012 void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
5013                                                unsigned Intrinsic) {
5014   // Ignore the callsite's attributes. A specific call site may be marked with
5015   // readnone, but the lowering code will expect the chain based on the
5016   // definition.
5017   const Function *F = I.getCalledFunction();
5018   bool HasChain = !F->doesNotAccessMemory();
5019   bool OnlyLoad = HasChain && F->onlyReadsMemory();
5020 
5021   // Build the operand list.
5022   SmallVector<SDValue, 8> Ops;
5023   if (HasChain) {  // If this intrinsic has side-effects, chainify it.
5024     if (OnlyLoad) {
5025       // We don't need to serialize loads against other loads.
5026       Ops.push_back(DAG.getRoot());
5027     } else {
5028       Ops.push_back(getRoot());
5029     }
5030   }
5031 
5032   // Info is set by getTgtMemIntrinsic
5033   TargetLowering::IntrinsicInfo Info;
5034   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5035   bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I,
5036                                                DAG.getMachineFunction(),
5037                                                Intrinsic);
5038 
5039   // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
5040   if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID ||
5041       Info.opc == ISD::INTRINSIC_W_CHAIN)
5042     Ops.push_back(DAG.getTargetConstant(Intrinsic, getCurSDLoc(),
5043                                         TLI.getPointerTy(DAG.getDataLayout())));
5044 
5045   // Add all operands of the call to the operand list.
5046   for (unsigned i = 0, e = I.arg_size(); i != e; ++i) {
5047     const Value *Arg = I.getArgOperand(i);
5048     if (!I.paramHasAttr(i, Attribute::ImmArg)) {
5049       Ops.push_back(getValue(Arg));
5050       continue;
5051     }
5052 
5053     // Use TargetConstant instead of a regular constant for immarg.
5054     EVT VT = TLI.getValueType(DAG.getDataLayout(), Arg->getType(), true);
5055     if (const ConstantInt *CI = dyn_cast<ConstantInt>(Arg)) {
5056       assert(CI->getBitWidth() <= 64 &&
5057              "large intrinsic immediates not handled");
5058       Ops.push_back(DAG.getTargetConstant(*CI, SDLoc(), VT));
5059     } else {
5060       Ops.push_back(
5061           DAG.getTargetConstantFP(*cast<ConstantFP>(Arg), SDLoc(), VT));
5062     }
5063   }
5064 
5065   SmallVector<EVT, 4> ValueVTs;
5066   ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);
5067 
5068   if (HasChain)
5069     ValueVTs.push_back(MVT::Other);
5070 
5071   SDVTList VTs = DAG.getVTList(ValueVTs);
5072 
5073   // Propagate fast-math-flags from IR to node(s).
5074   SDNodeFlags Flags;
5075   if (auto *FPMO = dyn_cast<FPMathOperator>(&I))
5076     Flags.copyFMF(*FPMO);
5077   SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
5078 
5079   // Create the node.
5080   SDValue Result;
5081   // In some cases, custom collection of operands from CallInst I may be needed.
5082   TLI.CollectTargetIntrinsicOperands(I, Ops, DAG);
5083   if (IsTgtIntrinsic) {
5084     // This is target intrinsic that touches memory
5085     //
5086     // TODO: We currently just fallback to address space 0 if getTgtMemIntrinsic
5087     //       didn't yield anything useful.
5088     MachinePointerInfo MPI;
5089     if (Info.ptrVal)
5090       MPI = MachinePointerInfo(Info.ptrVal, Info.offset);
5091     else if (Info.fallbackAddressSpace)
5092       MPI = MachinePointerInfo(*Info.fallbackAddressSpace);
5093     Result = DAG.getMemIntrinsicNode(Info.opc, getCurSDLoc(), VTs, Ops,
5094                                      Info.memVT, MPI, Info.align, Info.flags,
5095                                      Info.size, I.getAAMetadata());
5096   } else if (!HasChain) {
5097     Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurSDLoc(), VTs, Ops);
5098   } else if (!I.getType()->isVoidTy()) {
5099     Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurSDLoc(), VTs, Ops);
5100   } else {
5101     Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops);
5102   }
5103 
5104   if (HasChain) {
5105     SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
5106     if (OnlyLoad)
5107       PendingLoads.push_back(Chain);
5108     else
5109       DAG.setRoot(Chain);
5110   }
5111 
5112   if (!I.getType()->isVoidTy()) {
5113     if (!isa<VectorType>(I.getType()))
5114       Result = lowerRangeToAssertZExt(DAG, I, Result);
5115 
5116     MaybeAlign Alignment = I.getRetAlign();
5117 
5118     // Insert `assertalign` node if there's an alignment.
5119     if (InsertAssertAlign && Alignment) {
5120       Result =
5121           DAG.getAssertAlign(getCurSDLoc(), Result, Alignment.valueOrOne());
5122     }
5123 
5124     setValue(&I, Result);
5125   }
5126 }
5127 
5128 /// GetSignificand - Get the significand and build it into a floating-point
5129 /// number with exponent of 1:
5130 ///
5131 ///   Op = (Op & 0x007fffff) | 0x3f800000;
5132 ///
5133 /// where Op is the hexadecimal representation of floating point value.
5134 static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl) {
5135   SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
5136                            DAG.getConstant(0x007fffff, dl, MVT::i32));
5137   SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
5138                            DAG.getConstant(0x3f800000, dl, MVT::i32));
5139   return DAG.getNode(ISD::BITCAST, dl, MVT::f32, t2);
5140 }
5141 
5142 /// GetExponent - Get the exponent:
5143 ///
5144 ///   (float)(int)(((Op & 0x7f800000) >> 23) - 127);
5145 ///
5146 /// where Op is the hexadecimal representation of floating point value.
5147 static SDValue GetExponent(SelectionDAG &DAG, SDValue Op,
5148                            const TargetLowering &TLI, const SDLoc &dl) {
5149   SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
5150                            DAG.getConstant(0x7f800000, dl, MVT::i32));
5151   SDValue t1 = DAG.getNode(
5152       ISD::SRL, dl, MVT::i32, t0,
5153       DAG.getConstant(23, dl,
5154                       TLI.getShiftAmountTy(MVT::i32, DAG.getDataLayout())));
5155   SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
5156                            DAG.getConstant(127, dl, MVT::i32));
5157   return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
5158 }
5159 
5160 /// getF32Constant - Get 32-bit floating point constant.
5161 static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt,
5162                               const SDLoc &dl) {
5163   return DAG.getConstantFP(APFloat(APFloat::IEEEsingle(), APInt(32, Flt)), dl,
5164                            MVT::f32);
5165 }
5166 
5167 static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl,
5168                                        SelectionDAG &DAG) {
5169   // TODO: What fast-math-flags should be set on the floating-point nodes?
5170 
5171   //   IntegerPartOfX = ((int32_t)(t0);
5172   SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
5173 
5174   //   FractionalPartOfX = t0 - (float)IntegerPartOfX;
5175   SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
5176   SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
5177 
5178   //   IntegerPartOfX <<= 23;
5179   IntegerPartOfX =
5180       DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
5181                   DAG.getConstant(23, dl,
5182                                   DAG.getTargetLoweringInfo().getShiftAmountTy(
5183                                       MVT::i32, DAG.getDataLayout())));
5184 
5185   SDValue TwoToFractionalPartOfX;
5186   if (LimitFloatPrecision <= 6) {
5187     // For floating-point precision of 6:
5188     //
5189     //   TwoToFractionalPartOfX =
5190     //     0.997535578f +
5191     //       (0.735607626f + 0.252464424f * x) * x;
5192     //
5193     // error 0.0144103317, which is 6 bits
5194     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5195                              getF32Constant(DAG, 0x3e814304, dl));
5196     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5197                              getF32Constant(DAG, 0x3f3c50c8, dl));
5198     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5199     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5200                                          getF32Constant(DAG, 0x3f7f5e7e, dl));
5201   } else if (LimitFloatPrecision <= 12) {
5202     // For floating-point precision of 12:
5203     //
5204     //   TwoToFractionalPartOfX =
5205     //     0.999892986f +
5206     //       (0.696457318f +
5207     //         (0.224338339f + 0.792043434e-1f * x) * x) * x;
5208     //
5209     // error 0.000107046256, which is 13 to 14 bits
5210     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5211                              getF32Constant(DAG, 0x3da235e3, dl));
5212     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5213                              getF32Constant(DAG, 0x3e65b8f3, dl));
5214     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5215     SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5216                              getF32Constant(DAG, 0x3f324b07, dl));
5217     SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5218     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5219                                          getF32Constant(DAG, 0x3f7ff8fd, dl));
5220   } else { // LimitFloatPrecision <= 18
5221     // For floating-point precision of 18:
5222     //
5223     //   TwoToFractionalPartOfX =
5224     //     0.999999982f +
5225     //       (0.693148872f +
5226     //         (0.240227044f +
5227     //           (0.554906021e-1f +
5228     //             (0.961591928e-2f +
5229     //               (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
5230     // error 2.47208000*10^(-7), which is better than 18 bits
5231     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5232                              getF32Constant(DAG, 0x3924b03e, dl));
5233     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5234                              getF32Constant(DAG, 0x3ab24b87, dl));
5235     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5236     SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5237                              getF32Constant(DAG, 0x3c1d8c17, dl));
5238     SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5239     SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5240                              getF32Constant(DAG, 0x3d634a1d, dl));
5241     SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5242     SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5243                              getF32Constant(DAG, 0x3e75fe14, dl));
5244     SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5245     SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
5246                               getF32Constant(DAG, 0x3f317234, dl));
5247     SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
5248     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
5249                                          getF32Constant(DAG, 0x3f800000, dl));
5250   }
5251 
5252   // Add the exponent into the result in integer domain.
5253   SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, TwoToFractionalPartOfX);
5254   return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
5255                      DAG.getNode(ISD::ADD, dl, MVT::i32, t13, IntegerPartOfX));
5256 }
5257 
5258 /// expandExp - Lower an exp intrinsic. Handles the special sequences for
5259 /// limited-precision mode.
5260 static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5261                          const TargetLowering &TLI, SDNodeFlags Flags) {
5262   if (Op.getValueType() == MVT::f32 &&
5263       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5264 
5265     // Put the exponent in the right bit position for later addition to the
5266     // final result:
5267     //
5268     // t0 = Op * log2(e)
5269 
5270     // TODO: What fast-math-flags should be set here?
5271     SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
5272                              DAG.getConstantFP(numbers::log2ef, dl, MVT::f32));
5273     return getLimitedPrecisionExp2(t0, dl, DAG);
5274   }
5275 
5276   // No special expansion.
5277   return DAG.getNode(ISD::FEXP, dl, Op.getValueType(), Op, Flags);
5278 }
5279 
5280 /// expandLog - Lower a log intrinsic. Handles the special sequences for
5281 /// limited-precision mode.
5282 static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5283                          const TargetLowering &TLI, SDNodeFlags Flags) {
5284   // TODO: What fast-math-flags should be set on the floating-point nodes?
5285 
5286   if (Op.getValueType() == MVT::f32 &&
5287       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5288     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5289 
5290     // Scale the exponent by log(2).
5291     SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
5292     SDValue LogOfExponent =
5293         DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
5294                     DAG.getConstantFP(numbers::ln2f, dl, MVT::f32));
5295 
5296     // Get the significand and build it into a floating-point number with
5297     // exponent of 1.
5298     SDValue X = GetSignificand(DAG, Op1, dl);
5299 
5300     SDValue LogOfMantissa;
5301     if (LimitFloatPrecision <= 6) {
5302       // For floating-point precision of 6:
5303       //
5304       //   LogofMantissa =
5305       //     -1.1609546f +
5306       //       (1.4034025f - 0.23903021f * x) * x;
5307       //
5308       // error 0.0034276066, which is better than 8 bits
5309       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5310                                getF32Constant(DAG, 0xbe74c456, dl));
5311       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5312                                getF32Constant(DAG, 0x3fb3a2b1, dl));
5313       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5314       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5315                                   getF32Constant(DAG, 0x3f949a29, dl));
5316     } else if (LimitFloatPrecision <= 12) {
5317       // For floating-point precision of 12:
5318       //
5319       //   LogOfMantissa =
5320       //     -1.7417939f +
5321       //       (2.8212026f +
5322       //         (-1.4699568f +
5323       //           (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
5324       //
5325       // error 0.000061011436, which is 14 bits
5326       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5327                                getF32Constant(DAG, 0xbd67b6d6, dl));
5328       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5329                                getF32Constant(DAG, 0x3ee4f4b8, dl));
5330       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5331       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5332                                getF32Constant(DAG, 0x3fbc278b, dl));
5333       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5334       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5335                                getF32Constant(DAG, 0x40348e95, dl));
5336       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5337       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5338                                   getF32Constant(DAG, 0x3fdef31a, dl));
5339     } else { // LimitFloatPrecision <= 18
5340       // For floating-point precision of 18:
5341       //
5342       //   LogOfMantissa =
5343       //     -2.1072184f +
5344       //       (4.2372794f +
5345       //         (-3.7029485f +
5346       //           (2.2781945f +
5347       //             (-0.87823314f +
5348       //               (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
5349       //
5350       // error 0.0000023660568, which is better than 18 bits
5351       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5352                                getF32Constant(DAG, 0xbc91e5ac, dl));
5353       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5354                                getF32Constant(DAG, 0x3e4350aa, dl));
5355       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5356       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5357                                getF32Constant(DAG, 0x3f60d3e3, dl));
5358       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5359       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5360                                getF32Constant(DAG, 0x4011cdf0, dl));
5361       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5362       SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5363                                getF32Constant(DAG, 0x406cfd1c, dl));
5364       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5365       SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5366                                getF32Constant(DAG, 0x408797cb, dl));
5367       SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5368       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
5369                                   getF32Constant(DAG, 0x4006dcab, dl));
5370     }
5371 
5372     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, LogOfMantissa);
5373   }
5374 
5375   // No special expansion.
5376   return DAG.getNode(ISD::FLOG, dl, Op.getValueType(), Op, Flags);
5377 }
5378 
5379 /// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for
5380 /// limited-precision mode.
5381 static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5382                           const TargetLowering &TLI, SDNodeFlags Flags) {
5383   // TODO: What fast-math-flags should be set on the floating-point nodes?
5384 
5385   if (Op.getValueType() == MVT::f32 &&
5386       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5387     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5388 
5389     // Get the exponent.
5390     SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
5391 
5392     // Get the significand and build it into a floating-point number with
5393     // exponent of 1.
5394     SDValue X = GetSignificand(DAG, Op1, dl);
5395 
5396     // Different possible minimax approximations of significand in
5397     // floating-point for various degrees of accuracy over [1,2].
5398     SDValue Log2ofMantissa;
5399     if (LimitFloatPrecision <= 6) {
5400       // For floating-point precision of 6:
5401       //
5402       //   Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
5403       //
5404       // error 0.0049451742, which is more than 7 bits
5405       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5406                                getF32Constant(DAG, 0xbeb08fe0, dl));
5407       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5408                                getF32Constant(DAG, 0x40019463, dl));
5409       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5410       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5411                                    getF32Constant(DAG, 0x3fd6633d, dl));
5412     } else if (LimitFloatPrecision <= 12) {
5413       // For floating-point precision of 12:
5414       //
5415       //   Log2ofMantissa =
5416       //     -2.51285454f +
5417       //       (4.07009056f +
5418       //         (-2.12067489f +
5419       //           (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
5420       //
5421       // error 0.0000876136000, which is better than 13 bits
5422       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5423                                getF32Constant(DAG, 0xbda7262e, dl));
5424       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5425                                getF32Constant(DAG, 0x3f25280b, dl));
5426       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5427       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5428                                getF32Constant(DAG, 0x4007b923, dl));
5429       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5430       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5431                                getF32Constant(DAG, 0x40823e2f, dl));
5432       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5433       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5434                                    getF32Constant(DAG, 0x4020d29c, dl));
5435     } else { // LimitFloatPrecision <= 18
5436       // For floating-point precision of 18:
5437       //
5438       //   Log2ofMantissa =
5439       //     -3.0400495f +
5440       //       (6.1129976f +
5441       //         (-5.3420409f +
5442       //           (3.2865683f +
5443       //             (-1.2669343f +
5444       //               (0.27515199f -
5445       //                 0.25691327e-1f * x) * x) * x) * x) * x) * x;
5446       //
5447       // error 0.0000018516, which is better than 18 bits
5448       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5449                                getF32Constant(DAG, 0xbcd2769e, dl));
5450       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5451                                getF32Constant(DAG, 0x3e8ce0b9, dl));
5452       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5453       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5454                                getF32Constant(DAG, 0x3fa22ae7, dl));
5455       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5456       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5457                                getF32Constant(DAG, 0x40525723, dl));
5458       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5459       SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5460                                getF32Constant(DAG, 0x40aaf200, dl));
5461       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5462       SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5463                                getF32Constant(DAG, 0x40c39dad, dl));
5464       SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5465       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
5466                                    getF32Constant(DAG, 0x4042902c, dl));
5467     }
5468 
5469     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
5470   }
5471 
5472   // No special expansion.
5473   return DAG.getNode(ISD::FLOG2, dl, Op.getValueType(), Op, Flags);
5474 }
5475 
5476 /// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for
5477 /// limited-precision mode.
5478 static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5479                            const TargetLowering &TLI, SDNodeFlags Flags) {
5480   // TODO: What fast-math-flags should be set on the floating-point nodes?
5481 
5482   if (Op.getValueType() == MVT::f32 &&
5483       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5484     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5485 
5486     // Scale the exponent by log10(2) [0.30102999f].
5487     SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
5488     SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
5489                                         getF32Constant(DAG, 0x3e9a209a, dl));
5490 
5491     // Get the significand and build it into a floating-point number with
5492     // exponent of 1.
5493     SDValue X = GetSignificand(DAG, Op1, dl);
5494 
5495     SDValue Log10ofMantissa;
5496     if (LimitFloatPrecision <= 6) {
5497       // For floating-point precision of 6:
5498       //
5499       //   Log10ofMantissa =
5500       //     -0.50419619f +
5501       //       (0.60948995f - 0.10380950f * x) * x;
5502       //
5503       // error 0.0014886165, which is 6 bits
5504       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5505                                getF32Constant(DAG, 0xbdd49a13, dl));
5506       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5507                                getF32Constant(DAG, 0x3f1c0789, dl));
5508       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5509       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5510                                     getF32Constant(DAG, 0x3f011300, dl));
5511     } else if (LimitFloatPrecision <= 12) {
5512       // For floating-point precision of 12:
5513       //
5514       //   Log10ofMantissa =
5515       //     -0.64831180f +
5516       //       (0.91751397f +
5517       //         (-0.31664806f + 0.47637168e-1f * x) * x) * x;
5518       //
5519       // error 0.00019228036, which is better than 12 bits
5520       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5521                                getF32Constant(DAG, 0x3d431f31, dl));
5522       SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
5523                                getF32Constant(DAG, 0x3ea21fb2, dl));
5524       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5525       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5526                                getF32Constant(DAG, 0x3f6ae232, dl));
5527       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5528       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
5529                                     getF32Constant(DAG, 0x3f25f7c3, dl));
5530     } else { // LimitFloatPrecision <= 18
5531       // For floating-point precision of 18:
5532       //
5533       //   Log10ofMantissa =
5534       //     -0.84299375f +
5535       //       (1.5327582f +
5536       //         (-1.0688956f +
5537       //           (0.49102474f +
5538       //             (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
5539       //
5540       // error 0.0000037995730, which is better than 18 bits
5541       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5542                                getF32Constant(DAG, 0x3c5d51ce, dl));
5543       SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
5544                                getF32Constant(DAG, 0x3e00685a, dl));
5545       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5546       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5547                                getF32Constant(DAG, 0x3efb6798, dl));
5548       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5549       SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
5550                                getF32Constant(DAG, 0x3f88d192, dl));
5551       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5552       SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5553                                getF32Constant(DAG, 0x3fc4316c, dl));
5554       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5555       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
5556                                     getF32Constant(DAG, 0x3f57ce70, dl));
5557     }
5558 
5559     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
5560   }
5561 
5562   // No special expansion.
5563   return DAG.getNode(ISD::FLOG10, dl, Op.getValueType(), Op, Flags);
5564 }
5565 
5566 /// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for
5567 /// limited-precision mode.
5568 static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5569                           const TargetLowering &TLI, SDNodeFlags Flags) {
5570   if (Op.getValueType() == MVT::f32 &&
5571       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18)
5572     return getLimitedPrecisionExp2(Op, dl, DAG);
5573 
5574   // No special expansion.
5575   return DAG.getNode(ISD::FEXP2, dl, Op.getValueType(), Op, Flags);
5576 }
5577 
5578 /// visitPow - Lower a pow intrinsic. Handles the special sequences for
5579 /// limited-precision mode with x == 10.0f.
5580 static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS,
5581                          SelectionDAG &DAG, const TargetLowering &TLI,
5582                          SDNodeFlags Flags) {
5583   bool IsExp10 = false;
5584   if (LHS.getValueType() == MVT::f32 && RHS.getValueType() == MVT::f32 &&
5585       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5586     if (ConstantFPSDNode *LHSC = dyn_cast<ConstantFPSDNode>(LHS)) {
5587       APFloat Ten(10.0f);
5588       IsExp10 = LHSC->isExactlyValue(Ten);
5589     }
5590   }
5591 
5592   // TODO: What fast-math-flags should be set on the FMUL node?
5593   if (IsExp10) {
5594     // Put the exponent in the right bit position for later addition to the
5595     // final result:
5596     //
5597     //   #define LOG2OF10 3.3219281f
5598     //   t0 = Op * LOG2OF10;
5599     SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, RHS,
5600                              getF32Constant(DAG, 0x40549a78, dl));
5601     return getLimitedPrecisionExp2(t0, dl, DAG);
5602   }
5603 
5604   // No special expansion.
5605   return DAG.getNode(ISD::FPOW, dl, LHS.getValueType(), LHS, RHS, Flags);
5606 }
5607 
5608 /// ExpandPowI - Expand a llvm.powi intrinsic.
5609 static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS,
5610                           SelectionDAG &DAG) {
5611   // If RHS is a constant, we can expand this out to a multiplication tree if
5612   // it's beneficial on the target, otherwise we end up lowering to a call to
5613   // __powidf2 (for example).
5614   if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
5615     unsigned Val = RHSC->getSExtValue();
5616 
5617     // powi(x, 0) -> 1.0
5618     if (Val == 0)
5619       return DAG.getConstantFP(1.0, DL, LHS.getValueType());
5620 
5621     if (DAG.getTargetLoweringInfo().isBeneficialToExpandPowI(
5622             Val, DAG.shouldOptForSize())) {
5623       // Get the exponent as a positive value.
5624       if ((int)Val < 0)
5625         Val = -Val;
5626       // We use the simple binary decomposition method to generate the multiply
5627       // sequence.  There are more optimal ways to do this (for example,
5628       // powi(x,15) generates one more multiply than it should), but this has
5629       // the benefit of being both really simple and much better than a libcall.
5630       SDValue Res; // Logically starts equal to 1.0
5631       SDValue CurSquare = LHS;
5632       // TODO: Intrinsics should have fast-math-flags that propagate to these
5633       // nodes.
5634       while (Val) {
5635         if (Val & 1) {
5636           if (Res.getNode())
5637             Res =
5638                 DAG.getNode(ISD::FMUL, DL, Res.getValueType(), Res, CurSquare);
5639           else
5640             Res = CurSquare; // 1.0*CurSquare.
5641         }
5642 
5643         CurSquare = DAG.getNode(ISD::FMUL, DL, CurSquare.getValueType(),
5644                                 CurSquare, CurSquare);
5645         Val >>= 1;
5646       }
5647 
5648       // If the original was negative, invert the result, producing 1/(x*x*x).
5649       if (RHSC->getSExtValue() < 0)
5650         Res = DAG.getNode(ISD::FDIV, DL, LHS.getValueType(),
5651                           DAG.getConstantFP(1.0, DL, LHS.getValueType()), Res);
5652       return Res;
5653     }
5654   }
5655 
5656   // Otherwise, expand to a libcall.
5657   return DAG.getNode(ISD::FPOWI, DL, LHS.getValueType(), LHS, RHS);
5658 }
5659 
5660 static SDValue expandDivFix(unsigned Opcode, const SDLoc &DL,
5661                             SDValue LHS, SDValue RHS, SDValue Scale,
5662                             SelectionDAG &DAG, const TargetLowering &TLI) {
5663   EVT VT = LHS.getValueType();
5664   bool Signed = Opcode == ISD::SDIVFIX || Opcode == ISD::SDIVFIXSAT;
5665   bool Saturating = Opcode == ISD::SDIVFIXSAT || Opcode == ISD::UDIVFIXSAT;
5666   LLVMContext &Ctx = *DAG.getContext();
5667 
5668   // If the type is legal but the operation isn't, this node might survive all
5669   // the way to operation legalization. If we end up there and we do not have
5670   // the ability to widen the type (if VT*2 is not legal), we cannot expand the
5671   // node.
5672 
5673   // Coax the legalizer into expanding the node during type legalization instead
5674   // by bumping the size by one bit. This will force it to Promote, enabling the
5675   // early expansion and avoiding the need to expand later.
5676 
5677   // We don't have to do this if Scale is 0; that can always be expanded, unless
5678   // it's a saturating signed operation. Those can experience true integer
5679   // division overflow, a case which we must avoid.
5680 
5681   // FIXME: We wouldn't have to do this (or any of the early
5682   // expansion/promotion) if it was possible to expand a libcall of an
5683   // illegal type during operation legalization. But it's not, so things
5684   // get a bit hacky.
5685   unsigned ScaleInt = Scale->getAsZExtVal();
5686   if ((ScaleInt > 0 || (Saturating && Signed)) &&
5687       (TLI.isTypeLegal(VT) ||
5688        (VT.isVector() && TLI.isTypeLegal(VT.getVectorElementType())))) {
5689     TargetLowering::LegalizeAction Action = TLI.getFixedPointOperationAction(
5690         Opcode, VT, ScaleInt);
5691     if (Action != TargetLowering::Legal && Action != TargetLowering::Custom) {
5692       EVT PromVT;
5693       if (VT.isScalarInteger())
5694         PromVT = EVT::getIntegerVT(Ctx, VT.getSizeInBits() + 1);
5695       else if (VT.isVector()) {
5696         PromVT = VT.getVectorElementType();
5697         PromVT = EVT::getIntegerVT(Ctx, PromVT.getSizeInBits() + 1);
5698         PromVT = EVT::getVectorVT(Ctx, PromVT, VT.getVectorElementCount());
5699       } else
5700         llvm_unreachable("Wrong VT for DIVFIX?");
5701       LHS = DAG.getExtOrTrunc(Signed, LHS, DL, PromVT);
5702       RHS = DAG.getExtOrTrunc(Signed, RHS, DL, PromVT);
5703       EVT ShiftTy = TLI.getShiftAmountTy(PromVT, DAG.getDataLayout());
5704       // For saturating operations, we need to shift up the LHS to get the
5705       // proper saturation width, and then shift down again afterwards.
5706       if (Saturating)
5707         LHS = DAG.getNode(ISD::SHL, DL, PromVT, LHS,
5708                           DAG.getConstant(1, DL, ShiftTy));
5709       SDValue Res = DAG.getNode(Opcode, DL, PromVT, LHS, RHS, Scale);
5710       if (Saturating)
5711         Res = DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, PromVT, Res,
5712                           DAG.getConstant(1, DL, ShiftTy));
5713       return DAG.getZExtOrTrunc(Res, DL, VT);
5714     }
5715   }
5716 
5717   return DAG.getNode(Opcode, DL, VT, LHS, RHS, Scale);
5718 }
5719 
5720 // getUnderlyingArgRegs - Find underlying registers used for a truncated,
5721 // bitcasted, or split argument. Returns a list of <Register, size in bits>
5722 static void
5723 getUnderlyingArgRegs(SmallVectorImpl<std::pair<unsigned, TypeSize>> &Regs,
5724                      const SDValue &N) {
5725   switch (N.getOpcode()) {
5726   case ISD::CopyFromReg: {
5727     SDValue Op = N.getOperand(1);
5728     Regs.emplace_back(cast<RegisterSDNode>(Op)->getReg(),
5729                       Op.getValueType().getSizeInBits());
5730     return;
5731   }
5732   case ISD::BITCAST:
5733   case ISD::AssertZext:
5734   case ISD::AssertSext:
5735   case ISD::TRUNCATE:
5736     getUnderlyingArgRegs(Regs, N.getOperand(0));
5737     return;
5738   case ISD::BUILD_PAIR:
5739   case ISD::BUILD_VECTOR:
5740   case ISD::CONCAT_VECTORS:
5741     for (SDValue Op : N->op_values())
5742       getUnderlyingArgRegs(Regs, Op);
5743     return;
5744   default:
5745     return;
5746   }
5747 }
5748 
5749 /// If the DbgValueInst is a dbg_value of a function argument, create the
5750 /// corresponding DBG_VALUE machine instruction for it now.  At the end of
5751 /// instruction selection, they will be inserted to the entry BB.
5752 /// We don't currently support this for variadic dbg_values, as they shouldn't
5753 /// appear for function arguments or in the prologue.
5754 bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
5755     const Value *V, DILocalVariable *Variable, DIExpression *Expr,
5756     DILocation *DL, FuncArgumentDbgValueKind Kind, const SDValue &N) {
5757   const Argument *Arg = dyn_cast<Argument>(V);
5758   if (!Arg)
5759     return false;
5760 
5761   MachineFunction &MF = DAG.getMachineFunction();
5762   const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
5763 
5764   // Helper to create DBG_INSTR_REFs or DBG_VALUEs, depending on what kind
5765   // we've been asked to pursue.
5766   auto MakeVRegDbgValue = [&](Register Reg, DIExpression *FragExpr,
5767                               bool Indirect) {
5768     if (Reg.isVirtual() && MF.useDebugInstrRef()) {
5769       // For VRegs, in instruction referencing mode, create a DBG_INSTR_REF
5770       // pointing at the VReg, which will be patched up later.
5771       auto &Inst = TII->get(TargetOpcode::DBG_INSTR_REF);
5772       SmallVector<MachineOperand, 1> MOs({MachineOperand::CreateReg(
5773           /* Reg */ Reg, /* isDef */ false, /* isImp */ false,
5774           /* isKill */ false, /* isDead */ false,
5775           /* isUndef */ false, /* isEarlyClobber */ false,
5776           /* SubReg */ 0, /* isDebug */ true)});
5777 
5778       auto *NewDIExpr = FragExpr;
5779       // We don't have an "Indirect" field in DBG_INSTR_REF, fold that into
5780       // the DIExpression.
5781       if (Indirect)
5782         NewDIExpr = DIExpression::prepend(FragExpr, DIExpression::DerefBefore);
5783       SmallVector<uint64_t, 2> Ops({dwarf::DW_OP_LLVM_arg, 0});
5784       NewDIExpr = DIExpression::prependOpcodes(NewDIExpr, Ops);
5785       return BuildMI(MF, DL, Inst, false, MOs, Variable, NewDIExpr);
5786     } else {
5787       // Create a completely standard DBG_VALUE.
5788       auto &Inst = TII->get(TargetOpcode::DBG_VALUE);
5789       return BuildMI(MF, DL, Inst, Indirect, Reg, Variable, FragExpr);
5790     }
5791   };
5792 
5793   if (Kind == FuncArgumentDbgValueKind::Value) {
5794     // ArgDbgValues are hoisted to the beginning of the entry block. So we
5795     // should only emit as ArgDbgValue if the dbg.value intrinsic is found in
5796     // the entry block.
5797     bool IsInEntryBlock = FuncInfo.MBB == &FuncInfo.MF->front();
5798     if (!IsInEntryBlock)
5799       return false;
5800 
5801     // ArgDbgValues are hoisted to the beginning of the entry block.  So we
5802     // should only emit as ArgDbgValue if the dbg.value intrinsic describes a
5803     // variable that also is a param.
5804     //
5805     // Although, if we are at the top of the entry block already, we can still
5806     // emit using ArgDbgValue. This might catch some situations when the
5807     // dbg.value refers to an argument that isn't used in the entry block, so
5808     // any CopyToReg node would be optimized out and the only way to express
5809     // this DBG_VALUE is by using the physical reg (or FI) as done in this
5810     // method.  ArgDbgValues are hoisted to the beginning of the entry block. So
5811     // we should only emit as ArgDbgValue if the Variable is an argument to the
5812     // current function, and the dbg.value intrinsic is found in the entry
5813     // block.
5814     bool VariableIsFunctionInputArg = Variable->isParameter() &&
5815         !DL->getInlinedAt();
5816     bool IsInPrologue = SDNodeOrder == LowestSDNodeOrder;
5817     if (!IsInPrologue && !VariableIsFunctionInputArg)
5818       return false;
5819 
5820     // Here we assume that a function argument on IR level only can be used to
5821     // describe one input parameter on source level. If we for example have
5822     // source code like this
5823     //
5824     //    struct A { long x, y; };
5825     //    void foo(struct A a, long b) {
5826     //      ...
5827     //      b = a.x;
5828     //      ...
5829     //    }
5830     //
5831     // and IR like this
5832     //
5833     //  define void @foo(i32 %a1, i32 %a2, i32 %b)  {
5834     //  entry:
5835     //    call void @llvm.dbg.value(metadata i32 %a1, "a", DW_OP_LLVM_fragment
5836     //    call void @llvm.dbg.value(metadata i32 %a2, "a", DW_OP_LLVM_fragment
5837     //    call void @llvm.dbg.value(metadata i32 %b, "b",
5838     //    ...
5839     //    call void @llvm.dbg.value(metadata i32 %a1, "b"
5840     //    ...
5841     //
5842     // then the last dbg.value is describing a parameter "b" using a value that
5843     // is an argument. But since we already has used %a1 to describe a parameter
5844     // we should not handle that last dbg.value here (that would result in an
5845     // incorrect hoisting of the DBG_VALUE to the function entry).
5846     // Notice that we allow one dbg.value per IR level argument, to accommodate
5847     // for the situation with fragments above.
5848     if (VariableIsFunctionInputArg) {
5849       unsigned ArgNo = Arg->getArgNo();
5850       if (ArgNo >= FuncInfo.DescribedArgs.size())
5851         FuncInfo.DescribedArgs.resize(ArgNo + 1, false);
5852       else if (!IsInPrologue && FuncInfo.DescribedArgs.test(ArgNo))
5853         return false;
5854       FuncInfo.DescribedArgs.set(ArgNo);
5855     }
5856   }
5857 
5858   bool IsIndirect = false;
5859   std::optional<MachineOperand> Op;
5860   // Some arguments' frame index is recorded during argument lowering.
5861   int FI = FuncInfo.getArgumentFrameIndex(Arg);
5862   if (FI != std::numeric_limits<int>::max())
5863     Op = MachineOperand::CreateFI(FI);
5864 
5865   SmallVector<std::pair<unsigned, TypeSize>, 8> ArgRegsAndSizes;
5866   if (!Op && N.getNode()) {
5867     getUnderlyingArgRegs(ArgRegsAndSizes, N);
5868     Register Reg;
5869     if (ArgRegsAndSizes.size() == 1)
5870       Reg = ArgRegsAndSizes.front().first;
5871 
5872     if (Reg && Reg.isVirtual()) {
5873       MachineRegisterInfo &RegInfo = MF.getRegInfo();
5874       Register PR = RegInfo.getLiveInPhysReg(Reg);
5875       if (PR)
5876         Reg = PR;
5877     }
5878     if (Reg) {
5879       Op = MachineOperand::CreateReg(Reg, false);
5880       IsIndirect = Kind != FuncArgumentDbgValueKind::Value;
5881     }
5882   }
5883 
5884   if (!Op && N.getNode()) {
5885     // Check if frame index is available.
5886     SDValue LCandidate = peekThroughBitcasts(N);
5887     if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(LCandidate.getNode()))
5888       if (FrameIndexSDNode *FINode =
5889           dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
5890         Op = MachineOperand::CreateFI(FINode->getIndex());
5891   }
5892 
5893   if (!Op) {
5894     // Create a DBG_VALUE for each decomposed value in ArgRegs to cover Reg
5895     auto splitMultiRegDbgValue = [&](ArrayRef<std::pair<unsigned, TypeSize>>
5896                                          SplitRegs) {
5897       unsigned Offset = 0;
5898       for (const auto &RegAndSize : SplitRegs) {
5899         // If the expression is already a fragment, the current register
5900         // offset+size might extend beyond the fragment. In this case, only
5901         // the register bits that are inside the fragment are relevant.
5902         int RegFragmentSizeInBits = RegAndSize.second;
5903         if (auto ExprFragmentInfo = Expr->getFragmentInfo()) {
5904           uint64_t ExprFragmentSizeInBits = ExprFragmentInfo->SizeInBits;
5905           // The register is entirely outside the expression fragment,
5906           // so is irrelevant for debug info.
5907           if (Offset >= ExprFragmentSizeInBits)
5908             break;
5909           // The register is partially outside the expression fragment, only
5910           // the low bits within the fragment are relevant for debug info.
5911           if (Offset + RegFragmentSizeInBits > ExprFragmentSizeInBits) {
5912             RegFragmentSizeInBits = ExprFragmentSizeInBits - Offset;
5913           }
5914         }
5915 
5916         auto FragmentExpr = DIExpression::createFragmentExpression(
5917             Expr, Offset, RegFragmentSizeInBits);
5918         Offset += RegAndSize.second;
5919         // If a valid fragment expression cannot be created, the variable's
5920         // correct value cannot be determined and so it is set as Undef.
5921         if (!FragmentExpr) {
5922           SDDbgValue *SDV = DAG.getConstantDbgValue(
5923               Variable, Expr, UndefValue::get(V->getType()), DL, SDNodeOrder);
5924           DAG.AddDbgValue(SDV, false);
5925           continue;
5926         }
5927         MachineInstr *NewMI =
5928             MakeVRegDbgValue(RegAndSize.first, *FragmentExpr,
5929                              Kind != FuncArgumentDbgValueKind::Value);
5930         FuncInfo.ArgDbgValues.push_back(NewMI);
5931       }
5932     };
5933 
5934     // Check if ValueMap has reg number.
5935     DenseMap<const Value *, Register>::const_iterator
5936       VMI = FuncInfo.ValueMap.find(V);
5937     if (VMI != FuncInfo.ValueMap.end()) {
5938       const auto &TLI = DAG.getTargetLoweringInfo();
5939       RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), VMI->second,
5940                        V->getType(), std::nullopt);
5941       if (RFV.occupiesMultipleRegs()) {
5942         splitMultiRegDbgValue(RFV.getRegsAndSizes());
5943         return true;
5944       }
5945 
5946       Op = MachineOperand::CreateReg(VMI->second, false);
5947       IsIndirect = Kind != FuncArgumentDbgValueKind::Value;
5948     } else if (ArgRegsAndSizes.size() > 1) {
5949       // This was split due to the calling convention, and no virtual register
5950       // mapping exists for the value.
5951       splitMultiRegDbgValue(ArgRegsAndSizes);
5952       return true;
5953     }
5954   }
5955 
5956   if (!Op)
5957     return false;
5958 
5959   assert(Variable->isValidLocationForIntrinsic(DL) &&
5960          "Expected inlined-at fields to agree");
5961   MachineInstr *NewMI = nullptr;
5962 
5963   if (Op->isReg())
5964     NewMI = MakeVRegDbgValue(Op->getReg(), Expr, IsIndirect);
5965   else
5966     NewMI = BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), true, *Op,
5967                     Variable, Expr);
5968 
5969   // Otherwise, use ArgDbgValues.
5970   FuncInfo.ArgDbgValues.push_back(NewMI);
5971   return true;
5972 }
5973 
5974 /// Return the appropriate SDDbgValue based on N.
5975 SDDbgValue *SelectionDAGBuilder::getDbgValue(SDValue N,
5976                                              DILocalVariable *Variable,
5977                                              DIExpression *Expr,
5978                                              const DebugLoc &dl,
5979                                              unsigned DbgSDNodeOrder) {
5980   if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) {
5981     // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can describe
5982     // stack slot locations.
5983     //
5984     // Consider "int x = 0; int *px = &x;". There are two kinds of interesting
5985     // debug values here after optimization:
5986     //
5987     //   dbg.value(i32* %px, !"int *px", !DIExpression()), and
5988     //   dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref))
5989     //
5990     // Both describe the direct values of their associated variables.
5991     return DAG.getFrameIndexDbgValue(Variable, Expr, FISDN->getIndex(),
5992                                      /*IsIndirect*/ false, dl, DbgSDNodeOrder);
5993   }
5994   return DAG.getDbgValue(Variable, Expr, N.getNode(), N.getResNo(),
5995                          /*IsIndirect*/ false, dl, DbgSDNodeOrder);
5996 }
5997 
5998 static unsigned FixedPointIntrinsicToOpcode(unsigned Intrinsic) {
5999   switch (Intrinsic) {
6000   case Intrinsic::smul_fix:
6001     return ISD::SMULFIX;
6002   case Intrinsic::umul_fix:
6003     return ISD::UMULFIX;
6004   case Intrinsic::smul_fix_sat:
6005     return ISD::SMULFIXSAT;
6006   case Intrinsic::umul_fix_sat:
6007     return ISD::UMULFIXSAT;
6008   case Intrinsic::sdiv_fix:
6009     return ISD::SDIVFIX;
6010   case Intrinsic::udiv_fix:
6011     return ISD::UDIVFIX;
6012   case Intrinsic::sdiv_fix_sat:
6013     return ISD::SDIVFIXSAT;
6014   case Intrinsic::udiv_fix_sat:
6015     return ISD::UDIVFIXSAT;
6016   default:
6017     llvm_unreachable("Unhandled fixed point intrinsic");
6018   }
6019 }
6020 
6021 void SelectionDAGBuilder::lowerCallToExternalSymbol(const CallInst &I,
6022                                            const char *FunctionName) {
6023   assert(FunctionName && "FunctionName must not be nullptr");
6024   SDValue Callee = DAG.getExternalSymbol(
6025       FunctionName,
6026       DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()));
6027   LowerCallTo(I, Callee, I.isTailCall(), I.isMustTailCall());
6028 }
6029 
6030 /// Given a @llvm.call.preallocated.setup, return the corresponding
6031 /// preallocated call.
6032 static const CallBase *FindPreallocatedCall(const Value *PreallocatedSetup) {
6033   assert(cast<CallBase>(PreallocatedSetup)
6034                  ->getCalledFunction()
6035                  ->getIntrinsicID() == Intrinsic::call_preallocated_setup &&
6036          "expected call_preallocated_setup Value");
6037   for (const auto *U : PreallocatedSetup->users()) {
6038     auto *UseCall = cast<CallBase>(U);
6039     const Function *Fn = UseCall->getCalledFunction();
6040     if (!Fn || Fn->getIntrinsicID() != Intrinsic::call_preallocated_arg) {
6041       return UseCall;
6042     }
6043   }
6044   llvm_unreachable("expected corresponding call to preallocated setup/arg");
6045 }
6046 
6047 /// If DI is a debug value with an EntryValue expression, lower it using the
6048 /// corresponding physical register of the associated Argument value
6049 /// (guaranteed to exist by the verifier).
6050 bool SelectionDAGBuilder::visitEntryValueDbgValue(
6051     ArrayRef<const Value *> Values, DILocalVariable *Variable,
6052     DIExpression *Expr, DebugLoc DbgLoc) {
6053   if (!Expr->isEntryValue() || !hasSingleElement(Values))
6054     return false;
6055 
6056   // These properties are guaranteed by the verifier.
6057   const Argument *Arg = cast<Argument>(Values[0]);
6058   assert(Arg->hasAttribute(Attribute::AttrKind::SwiftAsync));
6059 
6060   auto ArgIt = FuncInfo.ValueMap.find(Arg);
6061   if (ArgIt == FuncInfo.ValueMap.end()) {
6062     LLVM_DEBUG(
6063         dbgs() << "Dropping dbg.value: expression is entry_value but "
6064                   "couldn't find an associated register for the Argument\n");
6065     return true;
6066   }
6067   Register ArgVReg = ArgIt->getSecond();
6068 
6069   for (auto [PhysReg, VirtReg] : FuncInfo.RegInfo->liveins())
6070     if (ArgVReg == VirtReg || ArgVReg == PhysReg) {
6071       SDDbgValue *SDV = DAG.getVRegDbgValue(
6072           Variable, Expr, PhysReg, false /*IsIndidrect*/, DbgLoc, SDNodeOrder);
6073       DAG.AddDbgValue(SDV, false /*treat as dbg.declare byval parameter*/);
6074       return true;
6075     }
6076   LLVM_DEBUG(dbgs() << "Dropping dbg.value: expression is entry_value but "
6077                        "couldn't find a physical register\n");
6078   return true;
6079 }
6080 
6081 /// Lower the call to the specified intrinsic function.
6082 void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
6083                                              unsigned Intrinsic) {
6084   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6085   SDLoc sdl = getCurSDLoc();
6086   DebugLoc dl = getCurDebugLoc();
6087   SDValue Res;
6088 
6089   SDNodeFlags Flags;
6090   if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
6091     Flags.copyFMF(*FPOp);
6092 
6093   switch (Intrinsic) {
6094   default:
6095     // By default, turn this into a target intrinsic node.
6096     visitTargetIntrinsic(I, Intrinsic);
6097     return;
6098   case Intrinsic::vscale: {
6099     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6100     setValue(&I, DAG.getVScale(sdl, VT, APInt(VT.getSizeInBits(), 1)));
6101     return;
6102   }
6103   case Intrinsic::vastart:  visitVAStart(I); return;
6104   case Intrinsic::vaend:    visitVAEnd(I); return;
6105   case Intrinsic::vacopy:   visitVACopy(I); return;
6106   case Intrinsic::returnaddress:
6107     setValue(&I, DAG.getNode(ISD::RETURNADDR, sdl,
6108                              TLI.getValueType(DAG.getDataLayout(), I.getType()),
6109                              getValue(I.getArgOperand(0))));
6110     return;
6111   case Intrinsic::addressofreturnaddress:
6112     setValue(&I,
6113              DAG.getNode(ISD::ADDROFRETURNADDR, sdl,
6114                          TLI.getValueType(DAG.getDataLayout(), I.getType())));
6115     return;
6116   case Intrinsic::sponentry:
6117     setValue(&I,
6118              DAG.getNode(ISD::SPONENTRY, sdl,
6119                          TLI.getValueType(DAG.getDataLayout(), I.getType())));
6120     return;
6121   case Intrinsic::frameaddress:
6122     setValue(&I, DAG.getNode(ISD::FRAMEADDR, sdl,
6123                              TLI.getFrameIndexTy(DAG.getDataLayout()),
6124                              getValue(I.getArgOperand(0))));
6125     return;
6126   case Intrinsic::read_volatile_register:
6127   case Intrinsic::read_register: {
6128     Value *Reg = I.getArgOperand(0);
6129     SDValue Chain = getRoot();
6130     SDValue RegName =
6131         DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
6132     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6133     Res = DAG.getNode(ISD::READ_REGISTER, sdl,
6134       DAG.getVTList(VT, MVT::Other), Chain, RegName);
6135     setValue(&I, Res);
6136     DAG.setRoot(Res.getValue(1));
6137     return;
6138   }
6139   case Intrinsic::write_register: {
6140     Value *Reg = I.getArgOperand(0);
6141     Value *RegValue = I.getArgOperand(1);
6142     SDValue Chain = getRoot();
6143     SDValue RegName =
6144         DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
6145     DAG.setRoot(DAG.getNode(ISD::WRITE_REGISTER, sdl, MVT::Other, Chain,
6146                             RegName, getValue(RegValue)));
6147     return;
6148   }
6149   case Intrinsic::memcpy: {
6150     const auto &MCI = cast<MemCpyInst>(I);
6151     SDValue Op1 = getValue(I.getArgOperand(0));
6152     SDValue Op2 = getValue(I.getArgOperand(1));
6153     SDValue Op3 = getValue(I.getArgOperand(2));
6154     // @llvm.memcpy defines 0 and 1 to both mean no alignment.
6155     Align DstAlign = MCI.getDestAlign().valueOrOne();
6156     Align SrcAlign = MCI.getSourceAlign().valueOrOne();
6157     Align Alignment = std::min(DstAlign, SrcAlign);
6158     bool isVol = MCI.isVolatile();
6159     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6160     // FIXME: Support passing different dest/src alignments to the memcpy DAG
6161     // node.
6162     SDValue Root = isVol ? getRoot() : getMemoryRoot();
6163     SDValue MC = DAG.getMemcpy(
6164         Root, sdl, Op1, Op2, Op3, Alignment, isVol,
6165         /* AlwaysInline */ false, isTC, MachinePointerInfo(I.getArgOperand(0)),
6166         MachinePointerInfo(I.getArgOperand(1)), I.getAAMetadata(), AA);
6167     updateDAGForMaybeTailCall(MC);
6168     return;
6169   }
6170   case Intrinsic::memcpy_inline: {
6171     const auto &MCI = cast<MemCpyInlineInst>(I);
6172     SDValue Dst = getValue(I.getArgOperand(0));
6173     SDValue Src = getValue(I.getArgOperand(1));
6174     SDValue Size = getValue(I.getArgOperand(2));
6175     assert(isa<ConstantSDNode>(Size) && "memcpy_inline needs constant size");
6176     // @llvm.memcpy.inline defines 0 and 1 to both mean no alignment.
6177     Align DstAlign = MCI.getDestAlign().valueOrOne();
6178     Align SrcAlign = MCI.getSourceAlign().valueOrOne();
6179     Align Alignment = std::min(DstAlign, SrcAlign);
6180     bool isVol = MCI.isVolatile();
6181     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6182     // FIXME: Support passing different dest/src alignments to the memcpy DAG
6183     // node.
6184     SDValue MC = DAG.getMemcpy(
6185         getRoot(), sdl, Dst, Src, Size, Alignment, isVol,
6186         /* AlwaysInline */ true, isTC, MachinePointerInfo(I.getArgOperand(0)),
6187         MachinePointerInfo(I.getArgOperand(1)), I.getAAMetadata(), AA);
6188     updateDAGForMaybeTailCall(MC);
6189     return;
6190   }
6191   case Intrinsic::memset: {
6192     const auto &MSI = cast<MemSetInst>(I);
6193     SDValue Op1 = getValue(I.getArgOperand(0));
6194     SDValue Op2 = getValue(I.getArgOperand(1));
6195     SDValue Op3 = getValue(I.getArgOperand(2));
6196     // @llvm.memset defines 0 and 1 to both mean no alignment.
6197     Align Alignment = MSI.getDestAlign().valueOrOne();
6198     bool isVol = MSI.isVolatile();
6199     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6200     SDValue Root = isVol ? getRoot() : getMemoryRoot();
6201     SDValue MS = DAG.getMemset(
6202         Root, sdl, Op1, Op2, Op3, Alignment, isVol, /* AlwaysInline */ false,
6203         isTC, MachinePointerInfo(I.getArgOperand(0)), I.getAAMetadata());
6204     updateDAGForMaybeTailCall(MS);
6205     return;
6206   }
6207   case Intrinsic::memset_inline: {
6208     const auto &MSII = cast<MemSetInlineInst>(I);
6209     SDValue Dst = getValue(I.getArgOperand(0));
6210     SDValue Value = getValue(I.getArgOperand(1));
6211     SDValue Size = getValue(I.getArgOperand(2));
6212     assert(isa<ConstantSDNode>(Size) && "memset_inline needs constant size");
6213     // @llvm.memset defines 0 and 1 to both mean no alignment.
6214     Align DstAlign = MSII.getDestAlign().valueOrOne();
6215     bool isVol = MSII.isVolatile();
6216     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6217     SDValue Root = isVol ? getRoot() : getMemoryRoot();
6218     SDValue MC = DAG.getMemset(Root, sdl, Dst, Value, Size, DstAlign, isVol,
6219                                /* AlwaysInline */ true, isTC,
6220                                MachinePointerInfo(I.getArgOperand(0)),
6221                                I.getAAMetadata());
6222     updateDAGForMaybeTailCall(MC);
6223     return;
6224   }
6225   case Intrinsic::memmove: {
6226     const auto &MMI = cast<MemMoveInst>(I);
6227     SDValue Op1 = getValue(I.getArgOperand(0));
6228     SDValue Op2 = getValue(I.getArgOperand(1));
6229     SDValue Op3 = getValue(I.getArgOperand(2));
6230     // @llvm.memmove defines 0 and 1 to both mean no alignment.
6231     Align DstAlign = MMI.getDestAlign().valueOrOne();
6232     Align SrcAlign = MMI.getSourceAlign().valueOrOne();
6233     Align Alignment = std::min(DstAlign, SrcAlign);
6234     bool isVol = MMI.isVolatile();
6235     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6236     // FIXME: Support passing different dest/src alignments to the memmove DAG
6237     // node.
6238     SDValue Root = isVol ? getRoot() : getMemoryRoot();
6239     SDValue MM = DAG.getMemmove(Root, sdl, Op1, Op2, Op3, Alignment, isVol,
6240                                 isTC, MachinePointerInfo(I.getArgOperand(0)),
6241                                 MachinePointerInfo(I.getArgOperand(1)),
6242                                 I.getAAMetadata(), AA);
6243     updateDAGForMaybeTailCall(MM);
6244     return;
6245   }
6246   case Intrinsic::memcpy_element_unordered_atomic: {
6247     const AtomicMemCpyInst &MI = cast<AtomicMemCpyInst>(I);
6248     SDValue Dst = getValue(MI.getRawDest());
6249     SDValue Src = getValue(MI.getRawSource());
6250     SDValue Length = getValue(MI.getLength());
6251 
6252     Type *LengthTy = MI.getLength()->getType();
6253     unsigned ElemSz = MI.getElementSizeInBytes();
6254     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6255     SDValue MC =
6256         DAG.getAtomicMemcpy(getRoot(), sdl, Dst, Src, Length, LengthTy, ElemSz,
6257                             isTC, MachinePointerInfo(MI.getRawDest()),
6258                             MachinePointerInfo(MI.getRawSource()));
6259     updateDAGForMaybeTailCall(MC);
6260     return;
6261   }
6262   case Intrinsic::memmove_element_unordered_atomic: {
6263     auto &MI = cast<AtomicMemMoveInst>(I);
6264     SDValue Dst = getValue(MI.getRawDest());
6265     SDValue Src = getValue(MI.getRawSource());
6266     SDValue Length = getValue(MI.getLength());
6267 
6268     Type *LengthTy = MI.getLength()->getType();
6269     unsigned ElemSz = MI.getElementSizeInBytes();
6270     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6271     SDValue MC =
6272         DAG.getAtomicMemmove(getRoot(), sdl, Dst, Src, Length, LengthTy, ElemSz,
6273                              isTC, MachinePointerInfo(MI.getRawDest()),
6274                              MachinePointerInfo(MI.getRawSource()));
6275     updateDAGForMaybeTailCall(MC);
6276     return;
6277   }
6278   case Intrinsic::memset_element_unordered_atomic: {
6279     auto &MI = cast<AtomicMemSetInst>(I);
6280     SDValue Dst = getValue(MI.getRawDest());
6281     SDValue Val = getValue(MI.getValue());
6282     SDValue Length = getValue(MI.getLength());
6283 
6284     Type *LengthTy = MI.getLength()->getType();
6285     unsigned ElemSz = MI.getElementSizeInBytes();
6286     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6287     SDValue MC =
6288         DAG.getAtomicMemset(getRoot(), sdl, Dst, Val, Length, LengthTy, ElemSz,
6289                             isTC, MachinePointerInfo(MI.getRawDest()));
6290     updateDAGForMaybeTailCall(MC);
6291     return;
6292   }
6293   case Intrinsic::call_preallocated_setup: {
6294     const CallBase *PreallocatedCall = FindPreallocatedCall(&I);
6295     SDValue SrcValue = DAG.getSrcValue(PreallocatedCall);
6296     SDValue Res = DAG.getNode(ISD::PREALLOCATED_SETUP, sdl, MVT::Other,
6297                               getRoot(), SrcValue);
6298     setValue(&I, Res);
6299     DAG.setRoot(Res);
6300     return;
6301   }
6302   case Intrinsic::call_preallocated_arg: {
6303     const CallBase *PreallocatedCall = FindPreallocatedCall(I.getOperand(0));
6304     SDValue SrcValue = DAG.getSrcValue(PreallocatedCall);
6305     SDValue Ops[3];
6306     Ops[0] = getRoot();
6307     Ops[1] = SrcValue;
6308     Ops[2] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(1)), sdl,
6309                                    MVT::i32); // arg index
6310     SDValue Res = DAG.getNode(
6311         ISD::PREALLOCATED_ARG, sdl,
6312         DAG.getVTList(TLI.getPointerTy(DAG.getDataLayout()), MVT::Other), Ops);
6313     setValue(&I, Res);
6314     DAG.setRoot(Res.getValue(1));
6315     return;
6316   }
6317   case Intrinsic::dbg_declare: {
6318     const auto &DI = cast<DbgDeclareInst>(I);
6319     // Debug intrinsics are handled separately in assignment tracking mode.
6320     // Some intrinsics are handled right after Argument lowering.
6321     if (AssignmentTrackingEnabled ||
6322         FuncInfo.PreprocessedDbgDeclares.count(&DI))
6323       return;
6324     LLVM_DEBUG(dbgs() << "SelectionDAG visiting dbg_declare: " << DI << "\n");
6325     DILocalVariable *Variable = DI.getVariable();
6326     DIExpression *Expression = DI.getExpression();
6327     dropDanglingDebugInfo(Variable, Expression);
6328     // Assume dbg.declare can not currently use DIArgList, i.e.
6329     // it is non-variadic.
6330     assert(!DI.hasArgList() && "Only dbg.value should currently use DIArgList");
6331     handleDebugDeclare(DI.getVariableLocationOp(0), Variable, Expression,
6332                        DI.getDebugLoc());
6333     return;
6334   }
6335   case Intrinsic::dbg_label: {
6336     const DbgLabelInst &DI = cast<DbgLabelInst>(I);
6337     DILabel *Label = DI.getLabel();
6338     assert(Label && "Missing label");
6339 
6340     SDDbgLabel *SDV;
6341     SDV = DAG.getDbgLabel(Label, dl, SDNodeOrder);
6342     DAG.AddDbgLabel(SDV);
6343     return;
6344   }
6345   case Intrinsic::dbg_assign: {
6346     // Debug intrinsics are handled seperately in assignment tracking mode.
6347     if (AssignmentTrackingEnabled)
6348       return;
6349     // If assignment tracking hasn't been enabled then fall through and treat
6350     // the dbg.assign as a dbg.value.
6351     [[fallthrough]];
6352   }
6353   case Intrinsic::dbg_value: {
6354     // Debug intrinsics are handled seperately in assignment tracking mode.
6355     if (AssignmentTrackingEnabled)
6356       return;
6357     const DbgValueInst &DI = cast<DbgValueInst>(I);
6358     assert(DI.getVariable() && "Missing variable");
6359 
6360     DILocalVariable *Variable = DI.getVariable();
6361     DIExpression *Expression = DI.getExpression();
6362     dropDanglingDebugInfo(Variable, Expression);
6363 
6364     if (DI.isKillLocation()) {
6365       handleKillDebugValue(Variable, Expression, DI.getDebugLoc(), SDNodeOrder);
6366       return;
6367     }
6368 
6369     SmallVector<Value *, 4> Values(DI.getValues());
6370     if (Values.empty())
6371       return;
6372 
6373     bool IsVariadic = DI.hasArgList();
6374     if (!handleDebugValue(Values, Variable, Expression, DI.getDebugLoc(),
6375                           SDNodeOrder, IsVariadic))
6376       addDanglingDebugInfo(Values, Variable, Expression, IsVariadic,
6377                            DI.getDebugLoc(), SDNodeOrder);
6378     return;
6379   }
6380 
6381   case Intrinsic::eh_typeid_for: {
6382     // Find the type id for the given typeinfo.
6383     GlobalValue *GV = ExtractTypeInfo(I.getArgOperand(0));
6384     unsigned TypeID = DAG.getMachineFunction().getTypeIDFor(GV);
6385     Res = DAG.getConstant(TypeID, sdl, MVT::i32);
6386     setValue(&I, Res);
6387     return;
6388   }
6389 
6390   case Intrinsic::eh_return_i32:
6391   case Intrinsic::eh_return_i64:
6392     DAG.getMachineFunction().setCallsEHReturn(true);
6393     DAG.setRoot(DAG.getNode(ISD::EH_RETURN, sdl,
6394                             MVT::Other,
6395                             getControlRoot(),
6396                             getValue(I.getArgOperand(0)),
6397                             getValue(I.getArgOperand(1))));
6398     return;
6399   case Intrinsic::eh_unwind_init:
6400     DAG.getMachineFunction().setCallsUnwindInit(true);
6401     return;
6402   case Intrinsic::eh_dwarf_cfa:
6403     setValue(&I, DAG.getNode(ISD::EH_DWARF_CFA, sdl,
6404                              TLI.getPointerTy(DAG.getDataLayout()),
6405                              getValue(I.getArgOperand(0))));
6406     return;
6407   case Intrinsic::eh_sjlj_callsite: {
6408     MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
6409     ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(0));
6410     assert(MMI.getCurrentCallSite() == 0 && "Overlapping call sites!");
6411 
6412     MMI.setCurrentCallSite(CI->getZExtValue());
6413     return;
6414   }
6415   case Intrinsic::eh_sjlj_functioncontext: {
6416     // Get and store the index of the function context.
6417     MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
6418     AllocaInst *FnCtx =
6419       cast<AllocaInst>(I.getArgOperand(0)->stripPointerCasts());
6420     int FI = FuncInfo.StaticAllocaMap[FnCtx];
6421     MFI.setFunctionContextIndex(FI);
6422     return;
6423   }
6424   case Intrinsic::eh_sjlj_setjmp: {
6425     SDValue Ops[2];
6426     Ops[0] = getRoot();
6427     Ops[1] = getValue(I.getArgOperand(0));
6428     SDValue Op = DAG.getNode(ISD::EH_SJLJ_SETJMP, sdl,
6429                              DAG.getVTList(MVT::i32, MVT::Other), Ops);
6430     setValue(&I, Op.getValue(0));
6431     DAG.setRoot(Op.getValue(1));
6432     return;
6433   }
6434   case Intrinsic::eh_sjlj_longjmp:
6435     DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, sdl, MVT::Other,
6436                             getRoot(), getValue(I.getArgOperand(0))));
6437     return;
6438   case Intrinsic::eh_sjlj_setup_dispatch:
6439     DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_SETUP_DISPATCH, sdl, MVT::Other,
6440                             getRoot()));
6441     return;
6442   case Intrinsic::masked_gather:
6443     visitMaskedGather(I);
6444     return;
6445   case Intrinsic::masked_load:
6446     visitMaskedLoad(I);
6447     return;
6448   case Intrinsic::masked_scatter:
6449     visitMaskedScatter(I);
6450     return;
6451   case Intrinsic::masked_store:
6452     visitMaskedStore(I);
6453     return;
6454   case Intrinsic::masked_expandload:
6455     visitMaskedLoad(I, true /* IsExpanding */);
6456     return;
6457   case Intrinsic::masked_compressstore:
6458     visitMaskedStore(I, true /* IsCompressing */);
6459     return;
6460   case Intrinsic::powi:
6461     setValue(&I, ExpandPowI(sdl, getValue(I.getArgOperand(0)),
6462                             getValue(I.getArgOperand(1)), DAG));
6463     return;
6464   case Intrinsic::log:
6465     setValue(&I, expandLog(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6466     return;
6467   case Intrinsic::log2:
6468     setValue(&I,
6469              expandLog2(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6470     return;
6471   case Intrinsic::log10:
6472     setValue(&I,
6473              expandLog10(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6474     return;
6475   case Intrinsic::exp:
6476     setValue(&I, expandExp(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6477     return;
6478   case Intrinsic::exp2:
6479     setValue(&I,
6480              expandExp2(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6481     return;
6482   case Intrinsic::pow:
6483     setValue(&I, expandPow(sdl, getValue(I.getArgOperand(0)),
6484                            getValue(I.getArgOperand(1)), DAG, TLI, Flags));
6485     return;
6486   case Intrinsic::sqrt:
6487   case Intrinsic::fabs:
6488   case Intrinsic::sin:
6489   case Intrinsic::cos:
6490   case Intrinsic::exp10:
6491   case Intrinsic::floor:
6492   case Intrinsic::ceil:
6493   case Intrinsic::trunc:
6494   case Intrinsic::rint:
6495   case Intrinsic::nearbyint:
6496   case Intrinsic::round:
6497   case Intrinsic::roundeven:
6498   case Intrinsic::canonicalize: {
6499     unsigned Opcode;
6500     switch (Intrinsic) {
6501     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
6502     case Intrinsic::sqrt:      Opcode = ISD::FSQRT;      break;
6503     case Intrinsic::fabs:      Opcode = ISD::FABS;       break;
6504     case Intrinsic::sin:       Opcode = ISD::FSIN;       break;
6505     case Intrinsic::cos:       Opcode = ISD::FCOS;       break;
6506     case Intrinsic::exp10:     Opcode = ISD::FEXP10;     break;
6507     case Intrinsic::floor:     Opcode = ISD::FFLOOR;     break;
6508     case Intrinsic::ceil:      Opcode = ISD::FCEIL;      break;
6509     case Intrinsic::trunc:     Opcode = ISD::FTRUNC;     break;
6510     case Intrinsic::rint:      Opcode = ISD::FRINT;      break;
6511     case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break;
6512     case Intrinsic::round:     Opcode = ISD::FROUND;     break;
6513     case Intrinsic::roundeven: Opcode = ISD::FROUNDEVEN; break;
6514     case Intrinsic::canonicalize: Opcode = ISD::FCANONICALIZE; break;
6515     }
6516 
6517     setValue(&I, DAG.getNode(Opcode, sdl,
6518                              getValue(I.getArgOperand(0)).getValueType(),
6519                              getValue(I.getArgOperand(0)), Flags));
6520     return;
6521   }
6522   case Intrinsic::lround:
6523   case Intrinsic::llround:
6524   case Intrinsic::lrint:
6525   case Intrinsic::llrint: {
6526     unsigned Opcode;
6527     switch (Intrinsic) {
6528     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
6529     case Intrinsic::lround:  Opcode = ISD::LROUND;  break;
6530     case Intrinsic::llround: Opcode = ISD::LLROUND; break;
6531     case Intrinsic::lrint:   Opcode = ISD::LRINT;   break;
6532     case Intrinsic::llrint:  Opcode = ISD::LLRINT;  break;
6533     }
6534 
6535     EVT RetVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6536     setValue(&I, DAG.getNode(Opcode, sdl, RetVT,
6537                              getValue(I.getArgOperand(0))));
6538     return;
6539   }
6540   case Intrinsic::minnum:
6541     setValue(&I, DAG.getNode(ISD::FMINNUM, sdl,
6542                              getValue(I.getArgOperand(0)).getValueType(),
6543                              getValue(I.getArgOperand(0)),
6544                              getValue(I.getArgOperand(1)), Flags));
6545     return;
6546   case Intrinsic::maxnum:
6547     setValue(&I, DAG.getNode(ISD::FMAXNUM, sdl,
6548                              getValue(I.getArgOperand(0)).getValueType(),
6549                              getValue(I.getArgOperand(0)),
6550                              getValue(I.getArgOperand(1)), Flags));
6551     return;
6552   case Intrinsic::minimum:
6553     setValue(&I, DAG.getNode(ISD::FMINIMUM, sdl,
6554                              getValue(I.getArgOperand(0)).getValueType(),
6555                              getValue(I.getArgOperand(0)),
6556                              getValue(I.getArgOperand(1)), Flags));
6557     return;
6558   case Intrinsic::maximum:
6559     setValue(&I, DAG.getNode(ISD::FMAXIMUM, sdl,
6560                              getValue(I.getArgOperand(0)).getValueType(),
6561                              getValue(I.getArgOperand(0)),
6562                              getValue(I.getArgOperand(1)), Flags));
6563     return;
6564   case Intrinsic::copysign:
6565     setValue(&I, DAG.getNode(ISD::FCOPYSIGN, sdl,
6566                              getValue(I.getArgOperand(0)).getValueType(),
6567                              getValue(I.getArgOperand(0)),
6568                              getValue(I.getArgOperand(1)), Flags));
6569     return;
6570   case Intrinsic::ldexp:
6571     setValue(&I, DAG.getNode(ISD::FLDEXP, sdl,
6572                              getValue(I.getArgOperand(0)).getValueType(),
6573                              getValue(I.getArgOperand(0)),
6574                              getValue(I.getArgOperand(1)), Flags));
6575     return;
6576   case Intrinsic::frexp: {
6577     SmallVector<EVT, 2> ValueVTs;
6578     ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);
6579     SDVTList VTs = DAG.getVTList(ValueVTs);
6580     setValue(&I,
6581              DAG.getNode(ISD::FFREXP, sdl, VTs, getValue(I.getArgOperand(0))));
6582     return;
6583   }
6584   case Intrinsic::arithmetic_fence: {
6585     setValue(&I, DAG.getNode(ISD::ARITH_FENCE, sdl,
6586                              getValue(I.getArgOperand(0)).getValueType(),
6587                              getValue(I.getArgOperand(0)), Flags));
6588     return;
6589   }
6590   case Intrinsic::fma:
6591     setValue(&I, DAG.getNode(
6592                      ISD::FMA, sdl, getValue(I.getArgOperand(0)).getValueType(),
6593                      getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)),
6594                      getValue(I.getArgOperand(2)), Flags));
6595     return;
6596 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC)                         \
6597   case Intrinsic::INTRINSIC:
6598 #include "llvm/IR/ConstrainedOps.def"
6599     visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(I));
6600     return;
6601 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
6602 #include "llvm/IR/VPIntrinsics.def"
6603     visitVectorPredicationIntrinsic(cast<VPIntrinsic>(I));
6604     return;
6605   case Intrinsic::fptrunc_round: {
6606     // Get the last argument, the metadata and convert it to an integer in the
6607     // call
6608     Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(1))->getMetadata();
6609     std::optional<RoundingMode> RoundMode =
6610         convertStrToRoundingMode(cast<MDString>(MD)->getString());
6611 
6612     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6613 
6614     // Propagate fast-math-flags from IR to node(s).
6615     SDNodeFlags Flags;
6616     Flags.copyFMF(*cast<FPMathOperator>(&I));
6617     SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
6618 
6619     SDValue Result;
6620     Result = DAG.getNode(
6621         ISD::FPTRUNC_ROUND, sdl, VT, getValue(I.getArgOperand(0)),
6622         DAG.getTargetConstant((int)*RoundMode, sdl,
6623                               TLI.getPointerTy(DAG.getDataLayout())));
6624     setValue(&I, Result);
6625 
6626     return;
6627   }
6628   case Intrinsic::fmuladd: {
6629     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6630     if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
6631         TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT)) {
6632       setValue(&I, DAG.getNode(ISD::FMA, sdl,
6633                                getValue(I.getArgOperand(0)).getValueType(),
6634                                getValue(I.getArgOperand(0)),
6635                                getValue(I.getArgOperand(1)),
6636                                getValue(I.getArgOperand(2)), Flags));
6637     } else {
6638       // TODO: Intrinsic calls should have fast-math-flags.
6639       SDValue Mul = DAG.getNode(
6640           ISD::FMUL, sdl, getValue(I.getArgOperand(0)).getValueType(),
6641           getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)), Flags);
6642       SDValue Add = DAG.getNode(ISD::FADD, sdl,
6643                                 getValue(I.getArgOperand(0)).getValueType(),
6644                                 Mul, getValue(I.getArgOperand(2)), Flags);
6645       setValue(&I, Add);
6646     }
6647     return;
6648   }
6649   case Intrinsic::convert_to_fp16:
6650     setValue(&I, DAG.getNode(ISD::BITCAST, sdl, MVT::i16,
6651                              DAG.getNode(ISD::FP_ROUND, sdl, MVT::f16,
6652                                          getValue(I.getArgOperand(0)),
6653                                          DAG.getTargetConstant(0, sdl,
6654                                                                MVT::i32))));
6655     return;
6656   case Intrinsic::convert_from_fp16:
6657     setValue(&I, DAG.getNode(ISD::FP_EXTEND, sdl,
6658                              TLI.getValueType(DAG.getDataLayout(), I.getType()),
6659                              DAG.getNode(ISD::BITCAST, sdl, MVT::f16,
6660                                          getValue(I.getArgOperand(0)))));
6661     return;
6662   case Intrinsic::fptosi_sat: {
6663     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6664     setValue(&I, DAG.getNode(ISD::FP_TO_SINT_SAT, sdl, VT,
6665                              getValue(I.getArgOperand(0)),
6666                              DAG.getValueType(VT.getScalarType())));
6667     return;
6668   }
6669   case Intrinsic::fptoui_sat: {
6670     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6671     setValue(&I, DAG.getNode(ISD::FP_TO_UINT_SAT, sdl, VT,
6672                              getValue(I.getArgOperand(0)),
6673                              DAG.getValueType(VT.getScalarType())));
6674     return;
6675   }
6676   case Intrinsic::set_rounding:
6677     Res = DAG.getNode(ISD::SET_ROUNDING, sdl, MVT::Other,
6678                       {getRoot(), getValue(I.getArgOperand(0))});
6679     setValue(&I, Res);
6680     DAG.setRoot(Res.getValue(0));
6681     return;
6682   case Intrinsic::is_fpclass: {
6683     const DataLayout DLayout = DAG.getDataLayout();
6684     EVT DestVT = TLI.getValueType(DLayout, I.getType());
6685     EVT ArgVT = TLI.getValueType(DLayout, I.getArgOperand(0)->getType());
6686     FPClassTest Test = static_cast<FPClassTest>(
6687         cast<ConstantInt>(I.getArgOperand(1))->getZExtValue());
6688     MachineFunction &MF = DAG.getMachineFunction();
6689     const Function &F = MF.getFunction();
6690     SDValue Op = getValue(I.getArgOperand(0));
6691     SDNodeFlags Flags;
6692     Flags.setNoFPExcept(
6693         !F.getAttributes().hasFnAttr(llvm::Attribute::StrictFP));
6694     // If ISD::IS_FPCLASS should be expanded, do it right now, because the
6695     // expansion can use illegal types. Making expansion early allows
6696     // legalizing these types prior to selection.
6697     if (!TLI.isOperationLegalOrCustom(ISD::IS_FPCLASS, ArgVT)) {
6698       SDValue Result = TLI.expandIS_FPCLASS(DestVT, Op, Test, Flags, sdl, DAG);
6699       setValue(&I, Result);
6700       return;
6701     }
6702 
6703     SDValue Check = DAG.getTargetConstant(Test, sdl, MVT::i32);
6704     SDValue V = DAG.getNode(ISD::IS_FPCLASS, sdl, DestVT, {Op, Check}, Flags);
6705     setValue(&I, V);
6706     return;
6707   }
6708   case Intrinsic::get_fpenv: {
6709     const DataLayout DLayout = DAG.getDataLayout();
6710     EVT EnvVT = TLI.getValueType(DLayout, I.getType());
6711     Align TempAlign = DAG.getEVTAlign(EnvVT);
6712     SDValue Chain = getRoot();
6713     // Use GET_FPENV if it is legal or custom. Otherwise use memory-based node
6714     // and temporary storage in stack.
6715     if (TLI.isOperationLegalOrCustom(ISD::GET_FPENV, EnvVT)) {
6716       Res = DAG.getNode(
6717           ISD::GET_FPENV, sdl,
6718           DAG.getVTList(TLI.getValueType(DAG.getDataLayout(), I.getType()),
6719                         MVT::Other),
6720           Chain);
6721     } else {
6722       SDValue Temp = DAG.CreateStackTemporary(EnvVT, TempAlign.value());
6723       int SPFI = cast<FrameIndexSDNode>(Temp.getNode())->getIndex();
6724       auto MPI =
6725           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
6726       MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
6727           MPI, MachineMemOperand::MOStore, MemoryLocation::UnknownSize,
6728           TempAlign);
6729       Chain = DAG.getGetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
6730       Res = DAG.getLoad(EnvVT, sdl, Chain, Temp, MPI);
6731     }
6732     setValue(&I, Res);
6733     DAG.setRoot(Res.getValue(1));
6734     return;
6735   }
6736   case Intrinsic::set_fpenv: {
6737     const DataLayout DLayout = DAG.getDataLayout();
6738     SDValue Env = getValue(I.getArgOperand(0));
6739     EVT EnvVT = Env.getValueType();
6740     Align TempAlign = DAG.getEVTAlign(EnvVT);
6741     SDValue Chain = getRoot();
6742     // If SET_FPENV is custom or legal, use it. Otherwise use loading
6743     // environment from memory.
6744     if (TLI.isOperationLegalOrCustom(ISD::SET_FPENV, EnvVT)) {
6745       Chain = DAG.getNode(ISD::SET_FPENV, sdl, MVT::Other, Chain, Env);
6746     } else {
6747       // Allocate space in stack, copy environment bits into it and use this
6748       // memory in SET_FPENV_MEM.
6749       SDValue Temp = DAG.CreateStackTemporary(EnvVT, TempAlign.value());
6750       int SPFI = cast<FrameIndexSDNode>(Temp.getNode())->getIndex();
6751       auto MPI =
6752           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
6753       Chain = DAG.getStore(Chain, sdl, Env, Temp, MPI, TempAlign,
6754                            MachineMemOperand::MOStore);
6755       MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
6756           MPI, MachineMemOperand::MOLoad, MemoryLocation::UnknownSize,
6757           TempAlign);
6758       Chain = DAG.getSetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
6759     }
6760     DAG.setRoot(Chain);
6761     return;
6762   }
6763   case Intrinsic::reset_fpenv:
6764     DAG.setRoot(DAG.getNode(ISD::RESET_FPENV, sdl, MVT::Other, getRoot()));
6765     return;
6766   case Intrinsic::get_fpmode:
6767     Res = DAG.getNode(
6768         ISD::GET_FPMODE, sdl,
6769         DAG.getVTList(TLI.getValueType(DAG.getDataLayout(), I.getType()),
6770                       MVT::Other),
6771         DAG.getRoot());
6772     setValue(&I, Res);
6773     DAG.setRoot(Res.getValue(1));
6774     return;
6775   case Intrinsic::set_fpmode:
6776     Res = DAG.getNode(ISD::SET_FPMODE, sdl, MVT::Other, {DAG.getRoot()},
6777                       getValue(I.getArgOperand(0)));
6778     DAG.setRoot(Res);
6779     return;
6780   case Intrinsic::reset_fpmode: {
6781     Res = DAG.getNode(ISD::RESET_FPMODE, sdl, MVT::Other, getRoot());
6782     DAG.setRoot(Res);
6783     return;
6784   }
6785   case Intrinsic::pcmarker: {
6786     SDValue Tmp = getValue(I.getArgOperand(0));
6787     DAG.setRoot(DAG.getNode(ISD::PCMARKER, sdl, MVT::Other, getRoot(), Tmp));
6788     return;
6789   }
6790   case Intrinsic::readcyclecounter: {
6791     SDValue Op = getRoot();
6792     Res = DAG.getNode(ISD::READCYCLECOUNTER, sdl,
6793                       DAG.getVTList(MVT::i64, MVT::Other), Op);
6794     setValue(&I, Res);
6795     DAG.setRoot(Res.getValue(1));
6796     return;
6797   }
6798   case Intrinsic::readsteadycounter: {
6799     SDValue Op = getRoot();
6800     Res = DAG.getNode(ISD::READSTEADYCOUNTER, sdl,
6801                       DAG.getVTList(MVT::i64, MVT::Other), Op);
6802     setValue(&I, Res);
6803     DAG.setRoot(Res.getValue(1));
6804     return;
6805   }
6806   case Intrinsic::bitreverse:
6807     setValue(&I, DAG.getNode(ISD::BITREVERSE, sdl,
6808                              getValue(I.getArgOperand(0)).getValueType(),
6809                              getValue(I.getArgOperand(0))));
6810     return;
6811   case Intrinsic::bswap:
6812     setValue(&I, DAG.getNode(ISD::BSWAP, sdl,
6813                              getValue(I.getArgOperand(0)).getValueType(),
6814                              getValue(I.getArgOperand(0))));
6815     return;
6816   case Intrinsic::cttz: {
6817     SDValue Arg = getValue(I.getArgOperand(0));
6818     ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
6819     EVT Ty = Arg.getValueType();
6820     setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_UNDEF,
6821                              sdl, Ty, Arg));
6822     return;
6823   }
6824   case Intrinsic::ctlz: {
6825     SDValue Arg = getValue(I.getArgOperand(0));
6826     ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
6827     EVT Ty = Arg.getValueType();
6828     setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_UNDEF,
6829                              sdl, Ty, Arg));
6830     return;
6831   }
6832   case Intrinsic::ctpop: {
6833     SDValue Arg = getValue(I.getArgOperand(0));
6834     EVT Ty = Arg.getValueType();
6835     setValue(&I, DAG.getNode(ISD::CTPOP, sdl, Ty, Arg));
6836     return;
6837   }
6838   case Intrinsic::fshl:
6839   case Intrinsic::fshr: {
6840     bool IsFSHL = Intrinsic == Intrinsic::fshl;
6841     SDValue X = getValue(I.getArgOperand(0));
6842     SDValue Y = getValue(I.getArgOperand(1));
6843     SDValue Z = getValue(I.getArgOperand(2));
6844     EVT VT = X.getValueType();
6845 
6846     if (X == Y) {
6847       auto RotateOpcode = IsFSHL ? ISD::ROTL : ISD::ROTR;
6848       setValue(&I, DAG.getNode(RotateOpcode, sdl, VT, X, Z));
6849     } else {
6850       auto FunnelOpcode = IsFSHL ? ISD::FSHL : ISD::FSHR;
6851       setValue(&I, DAG.getNode(FunnelOpcode, sdl, VT, X, Y, Z));
6852     }
6853     return;
6854   }
6855   case Intrinsic::sadd_sat: {
6856     SDValue Op1 = getValue(I.getArgOperand(0));
6857     SDValue Op2 = getValue(I.getArgOperand(1));
6858     setValue(&I, DAG.getNode(ISD::SADDSAT, sdl, Op1.getValueType(), Op1, Op2));
6859     return;
6860   }
6861   case Intrinsic::uadd_sat: {
6862     SDValue Op1 = getValue(I.getArgOperand(0));
6863     SDValue Op2 = getValue(I.getArgOperand(1));
6864     setValue(&I, DAG.getNode(ISD::UADDSAT, sdl, Op1.getValueType(), Op1, Op2));
6865     return;
6866   }
6867   case Intrinsic::ssub_sat: {
6868     SDValue Op1 = getValue(I.getArgOperand(0));
6869     SDValue Op2 = getValue(I.getArgOperand(1));
6870     setValue(&I, DAG.getNode(ISD::SSUBSAT, sdl, Op1.getValueType(), Op1, Op2));
6871     return;
6872   }
6873   case Intrinsic::usub_sat: {
6874     SDValue Op1 = getValue(I.getArgOperand(0));
6875     SDValue Op2 = getValue(I.getArgOperand(1));
6876     setValue(&I, DAG.getNode(ISD::USUBSAT, sdl, Op1.getValueType(), Op1, Op2));
6877     return;
6878   }
6879   case Intrinsic::sshl_sat: {
6880     SDValue Op1 = getValue(I.getArgOperand(0));
6881     SDValue Op2 = getValue(I.getArgOperand(1));
6882     setValue(&I, DAG.getNode(ISD::SSHLSAT, sdl, Op1.getValueType(), Op1, Op2));
6883     return;
6884   }
6885   case Intrinsic::ushl_sat: {
6886     SDValue Op1 = getValue(I.getArgOperand(0));
6887     SDValue Op2 = getValue(I.getArgOperand(1));
6888     setValue(&I, DAG.getNode(ISD::USHLSAT, sdl, Op1.getValueType(), Op1, Op2));
6889     return;
6890   }
6891   case Intrinsic::smul_fix:
6892   case Intrinsic::umul_fix:
6893   case Intrinsic::smul_fix_sat:
6894   case Intrinsic::umul_fix_sat: {
6895     SDValue Op1 = getValue(I.getArgOperand(0));
6896     SDValue Op2 = getValue(I.getArgOperand(1));
6897     SDValue Op3 = getValue(I.getArgOperand(2));
6898     setValue(&I, DAG.getNode(FixedPointIntrinsicToOpcode(Intrinsic), sdl,
6899                              Op1.getValueType(), Op1, Op2, Op3));
6900     return;
6901   }
6902   case Intrinsic::sdiv_fix:
6903   case Intrinsic::udiv_fix:
6904   case Intrinsic::sdiv_fix_sat:
6905   case Intrinsic::udiv_fix_sat: {
6906     SDValue Op1 = getValue(I.getArgOperand(0));
6907     SDValue Op2 = getValue(I.getArgOperand(1));
6908     SDValue Op3 = getValue(I.getArgOperand(2));
6909     setValue(&I, expandDivFix(FixedPointIntrinsicToOpcode(Intrinsic), sdl,
6910                               Op1, Op2, Op3, DAG, TLI));
6911     return;
6912   }
6913   case Intrinsic::smax: {
6914     SDValue Op1 = getValue(I.getArgOperand(0));
6915     SDValue Op2 = getValue(I.getArgOperand(1));
6916     setValue(&I, DAG.getNode(ISD::SMAX, sdl, Op1.getValueType(), Op1, Op2));
6917     return;
6918   }
6919   case Intrinsic::smin: {
6920     SDValue Op1 = getValue(I.getArgOperand(0));
6921     SDValue Op2 = getValue(I.getArgOperand(1));
6922     setValue(&I, DAG.getNode(ISD::SMIN, sdl, Op1.getValueType(), Op1, Op2));
6923     return;
6924   }
6925   case Intrinsic::umax: {
6926     SDValue Op1 = getValue(I.getArgOperand(0));
6927     SDValue Op2 = getValue(I.getArgOperand(1));
6928     setValue(&I, DAG.getNode(ISD::UMAX, sdl, Op1.getValueType(), Op1, Op2));
6929     return;
6930   }
6931   case Intrinsic::umin: {
6932     SDValue Op1 = getValue(I.getArgOperand(0));
6933     SDValue Op2 = getValue(I.getArgOperand(1));
6934     setValue(&I, DAG.getNode(ISD::UMIN, sdl, Op1.getValueType(), Op1, Op2));
6935     return;
6936   }
6937   case Intrinsic::abs: {
6938     // TODO: Preserve "int min is poison" arg in SDAG?
6939     SDValue Op1 = getValue(I.getArgOperand(0));
6940     setValue(&I, DAG.getNode(ISD::ABS, sdl, Op1.getValueType(), Op1));
6941     return;
6942   }
6943   case Intrinsic::stacksave: {
6944     SDValue Op = getRoot();
6945     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6946     Res = DAG.getNode(ISD::STACKSAVE, sdl, DAG.getVTList(VT, MVT::Other), Op);
6947     setValue(&I, Res);
6948     DAG.setRoot(Res.getValue(1));
6949     return;
6950   }
6951   case Intrinsic::stackrestore:
6952     Res = getValue(I.getArgOperand(0));
6953     DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other, getRoot(), Res));
6954     return;
6955   case Intrinsic::get_dynamic_area_offset: {
6956     SDValue Op = getRoot();
6957     EVT PtrTy = TLI.getFrameIndexTy(DAG.getDataLayout());
6958     EVT ResTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
6959     // Result type for @llvm.get.dynamic.area.offset should match PtrTy for
6960     // target.
6961     if (PtrTy.getFixedSizeInBits() < ResTy.getFixedSizeInBits())
6962       report_fatal_error("Wrong result type for @llvm.get.dynamic.area.offset"
6963                          " intrinsic!");
6964     Res = DAG.getNode(ISD::GET_DYNAMIC_AREA_OFFSET, sdl, DAG.getVTList(ResTy),
6965                       Op);
6966     DAG.setRoot(Op);
6967     setValue(&I, Res);
6968     return;
6969   }
6970   case Intrinsic::stackguard: {
6971     MachineFunction &MF = DAG.getMachineFunction();
6972     const Module &M = *MF.getFunction().getParent();
6973     EVT PtrTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
6974     SDValue Chain = getRoot();
6975     if (TLI.useLoadStackGuardNode()) {
6976       Res = getLoadStackGuard(DAG, sdl, Chain);
6977       Res = DAG.getPtrExtOrTrunc(Res, sdl, PtrTy);
6978     } else {
6979       const Value *Global = TLI.getSDagStackGuard(M);
6980       Align Align = DAG.getDataLayout().getPrefTypeAlign(Global->getType());
6981       Res = DAG.getLoad(PtrTy, sdl, Chain, getValue(Global),
6982                         MachinePointerInfo(Global, 0), Align,
6983                         MachineMemOperand::MOVolatile);
6984     }
6985     if (TLI.useStackGuardXorFP())
6986       Res = TLI.emitStackGuardXorFP(DAG, Res, sdl);
6987     DAG.setRoot(Chain);
6988     setValue(&I, Res);
6989     return;
6990   }
6991   case Intrinsic::stackprotector: {
6992     // Emit code into the DAG to store the stack guard onto the stack.
6993     MachineFunction &MF = DAG.getMachineFunction();
6994     MachineFrameInfo &MFI = MF.getFrameInfo();
6995     SDValue Src, Chain = getRoot();
6996 
6997     if (TLI.useLoadStackGuardNode())
6998       Src = getLoadStackGuard(DAG, sdl, Chain);
6999     else
7000       Src = getValue(I.getArgOperand(0));   // The guard's value.
7001 
7002     AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
7003 
7004     int FI = FuncInfo.StaticAllocaMap[Slot];
7005     MFI.setStackProtectorIndex(FI);
7006     EVT PtrTy = TLI.getFrameIndexTy(DAG.getDataLayout());
7007 
7008     SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
7009 
7010     // Store the stack protector onto the stack.
7011     Res = DAG.getStore(
7012         Chain, sdl, Src, FIN,
7013         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
7014         MaybeAlign(), MachineMemOperand::MOVolatile);
7015     setValue(&I, Res);
7016     DAG.setRoot(Res);
7017     return;
7018   }
7019   case Intrinsic::objectsize:
7020     llvm_unreachable("llvm.objectsize.* should have been lowered already");
7021 
7022   case Intrinsic::is_constant:
7023     llvm_unreachable("llvm.is.constant.* should have been lowered already");
7024 
7025   case Intrinsic::annotation:
7026   case Intrinsic::ptr_annotation:
7027   case Intrinsic::launder_invariant_group:
7028   case Intrinsic::strip_invariant_group:
7029     // Drop the intrinsic, but forward the value
7030     setValue(&I, getValue(I.getOperand(0)));
7031     return;
7032 
7033   case Intrinsic::assume:
7034   case Intrinsic::experimental_noalias_scope_decl:
7035   case Intrinsic::var_annotation:
7036   case Intrinsic::sideeffect:
7037     // Discard annotate attributes, noalias scope declarations, assumptions, and
7038     // artificial side-effects.
7039     return;
7040 
7041   case Intrinsic::codeview_annotation: {
7042     // Emit a label associated with this metadata.
7043     MachineFunction &MF = DAG.getMachineFunction();
7044     MCSymbol *Label =
7045         MF.getMMI().getContext().createTempSymbol("annotation", true);
7046     Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(0))->getMetadata();
7047     MF.addCodeViewAnnotation(Label, cast<MDNode>(MD));
7048     Res = DAG.getLabelNode(ISD::ANNOTATION_LABEL, sdl, getRoot(), Label);
7049     DAG.setRoot(Res);
7050     return;
7051   }
7052 
7053   case Intrinsic::init_trampoline: {
7054     const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts());
7055 
7056     SDValue Ops[6];
7057     Ops[0] = getRoot();
7058     Ops[1] = getValue(I.getArgOperand(0));
7059     Ops[2] = getValue(I.getArgOperand(1));
7060     Ops[3] = getValue(I.getArgOperand(2));
7061     Ops[4] = DAG.getSrcValue(I.getArgOperand(0));
7062     Ops[5] = DAG.getSrcValue(F);
7063 
7064     Res = DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other, Ops);
7065 
7066     DAG.setRoot(Res);
7067     return;
7068   }
7069   case Intrinsic::adjust_trampoline:
7070     setValue(&I, DAG.getNode(ISD::ADJUST_TRAMPOLINE, sdl,
7071                              TLI.getPointerTy(DAG.getDataLayout()),
7072                              getValue(I.getArgOperand(0))));
7073     return;
7074   case Intrinsic::gcroot: {
7075     assert(DAG.getMachineFunction().getFunction().hasGC() &&
7076            "only valid in functions with gc specified, enforced by Verifier");
7077     assert(GFI && "implied by previous");
7078     const Value *Alloca = I.getArgOperand(0)->stripPointerCasts();
7079     const Constant *TypeMap = cast<Constant>(I.getArgOperand(1));
7080 
7081     FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
7082     GFI->addStackRoot(FI->getIndex(), TypeMap);
7083     return;
7084   }
7085   case Intrinsic::gcread:
7086   case Intrinsic::gcwrite:
7087     llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
7088   case Intrinsic::get_rounding:
7089     Res = DAG.getNode(ISD::GET_ROUNDING, sdl, {MVT::i32, MVT::Other}, getRoot());
7090     setValue(&I, Res);
7091     DAG.setRoot(Res.getValue(1));
7092     return;
7093 
7094   case Intrinsic::expect:
7095     // Just replace __builtin_expect(exp, c) with EXP.
7096     setValue(&I, getValue(I.getArgOperand(0)));
7097     return;
7098 
7099   case Intrinsic::ubsantrap:
7100   case Intrinsic::debugtrap:
7101   case Intrinsic::trap: {
7102     StringRef TrapFuncName =
7103         I.getAttributes().getFnAttr("trap-func-name").getValueAsString();
7104     if (TrapFuncName.empty()) {
7105       switch (Intrinsic) {
7106       case Intrinsic::trap:
7107         DAG.setRoot(DAG.getNode(ISD::TRAP, sdl, MVT::Other, getRoot()));
7108         break;
7109       case Intrinsic::debugtrap:
7110         DAG.setRoot(DAG.getNode(ISD::DEBUGTRAP, sdl, MVT::Other, getRoot()));
7111         break;
7112       case Intrinsic::ubsantrap:
7113         DAG.setRoot(DAG.getNode(
7114             ISD::UBSANTRAP, sdl, MVT::Other, getRoot(),
7115             DAG.getTargetConstant(
7116                 cast<ConstantInt>(I.getArgOperand(0))->getZExtValue(), sdl,
7117                 MVT::i32)));
7118         break;
7119       default: llvm_unreachable("unknown trap intrinsic");
7120       }
7121       return;
7122     }
7123     TargetLowering::ArgListTy Args;
7124     if (Intrinsic == Intrinsic::ubsantrap) {
7125       Args.push_back(TargetLoweringBase::ArgListEntry());
7126       Args[0].Val = I.getArgOperand(0);
7127       Args[0].Node = getValue(Args[0].Val);
7128       Args[0].Ty = Args[0].Val->getType();
7129     }
7130 
7131     TargetLowering::CallLoweringInfo CLI(DAG);
7132     CLI.setDebugLoc(sdl).setChain(getRoot()).setLibCallee(
7133         CallingConv::C, I.getType(),
7134         DAG.getExternalSymbol(TrapFuncName.data(),
7135                               TLI.getPointerTy(DAG.getDataLayout())),
7136         std::move(Args));
7137 
7138     std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
7139     DAG.setRoot(Result.second);
7140     return;
7141   }
7142 
7143   case Intrinsic::uadd_with_overflow:
7144   case Intrinsic::sadd_with_overflow:
7145   case Intrinsic::usub_with_overflow:
7146   case Intrinsic::ssub_with_overflow:
7147   case Intrinsic::umul_with_overflow:
7148   case Intrinsic::smul_with_overflow: {
7149     ISD::NodeType Op;
7150     switch (Intrinsic) {
7151     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
7152     case Intrinsic::uadd_with_overflow: Op = ISD::UADDO; break;
7153     case Intrinsic::sadd_with_overflow: Op = ISD::SADDO; break;
7154     case Intrinsic::usub_with_overflow: Op = ISD::USUBO; break;
7155     case Intrinsic::ssub_with_overflow: Op = ISD::SSUBO; break;
7156     case Intrinsic::umul_with_overflow: Op = ISD::UMULO; break;
7157     case Intrinsic::smul_with_overflow: Op = ISD::SMULO; break;
7158     }
7159     SDValue Op1 = getValue(I.getArgOperand(0));
7160     SDValue Op2 = getValue(I.getArgOperand(1));
7161 
7162     EVT ResultVT = Op1.getValueType();
7163     EVT OverflowVT = MVT::i1;
7164     if (ResultVT.isVector())
7165       OverflowVT = EVT::getVectorVT(
7166           *Context, OverflowVT, ResultVT.getVectorElementCount());
7167 
7168     SDVTList VTs = DAG.getVTList(ResultVT, OverflowVT);
7169     setValue(&I, DAG.getNode(Op, sdl, VTs, Op1, Op2));
7170     return;
7171   }
7172   case Intrinsic::prefetch: {
7173     SDValue Ops[5];
7174     unsigned rw = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
7175     auto Flags = rw == 0 ? MachineMemOperand::MOLoad :MachineMemOperand::MOStore;
7176     Ops[0] = DAG.getRoot();
7177     Ops[1] = getValue(I.getArgOperand(0));
7178     Ops[2] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(1)), sdl,
7179                                    MVT::i32);
7180     Ops[3] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(2)), sdl,
7181                                    MVT::i32);
7182     Ops[4] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(3)), sdl,
7183                                    MVT::i32);
7184     SDValue Result = DAG.getMemIntrinsicNode(
7185         ISD::PREFETCH, sdl, DAG.getVTList(MVT::Other), Ops,
7186         EVT::getIntegerVT(*Context, 8), MachinePointerInfo(I.getArgOperand(0)),
7187         /* align */ std::nullopt, Flags);
7188 
7189     // Chain the prefetch in parallel with any pending loads, to stay out of
7190     // the way of later optimizations.
7191     PendingLoads.push_back(Result);
7192     Result = getRoot();
7193     DAG.setRoot(Result);
7194     return;
7195   }
7196   case Intrinsic::lifetime_start:
7197   case Intrinsic::lifetime_end: {
7198     bool IsStart = (Intrinsic == Intrinsic::lifetime_start);
7199     // Stack coloring is not enabled in O0, discard region information.
7200     if (TM.getOptLevel() == CodeGenOptLevel::None)
7201       return;
7202 
7203     const int64_t ObjectSize =
7204         cast<ConstantInt>(I.getArgOperand(0))->getSExtValue();
7205     Value *const ObjectPtr = I.getArgOperand(1);
7206     SmallVector<const Value *, 4> Allocas;
7207     getUnderlyingObjects(ObjectPtr, Allocas);
7208 
7209     for (const Value *Alloca : Allocas) {
7210       const AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(Alloca);
7211 
7212       // Could not find an Alloca.
7213       if (!LifetimeObject)
7214         continue;
7215 
7216       // First check that the Alloca is static, otherwise it won't have a
7217       // valid frame index.
7218       auto SI = FuncInfo.StaticAllocaMap.find(LifetimeObject);
7219       if (SI == FuncInfo.StaticAllocaMap.end())
7220         return;
7221 
7222       const int FrameIndex = SI->second;
7223       int64_t Offset;
7224       if (GetPointerBaseWithConstantOffset(
7225               ObjectPtr, Offset, DAG.getDataLayout()) != LifetimeObject)
7226         Offset = -1; // Cannot determine offset from alloca to lifetime object.
7227       Res = DAG.getLifetimeNode(IsStart, sdl, getRoot(), FrameIndex, ObjectSize,
7228                                 Offset);
7229       DAG.setRoot(Res);
7230     }
7231     return;
7232   }
7233   case Intrinsic::pseudoprobe: {
7234     auto Guid = cast<ConstantInt>(I.getArgOperand(0))->getZExtValue();
7235     auto Index = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
7236     auto Attr = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue();
7237     Res = DAG.getPseudoProbeNode(sdl, getRoot(), Guid, Index, Attr);
7238     DAG.setRoot(Res);
7239     return;
7240   }
7241   case Intrinsic::invariant_start:
7242     // Discard region information.
7243     setValue(&I,
7244              DAG.getUNDEF(TLI.getValueType(DAG.getDataLayout(), I.getType())));
7245     return;
7246   case Intrinsic::invariant_end:
7247     // Discard region information.
7248     return;
7249   case Intrinsic::clear_cache:
7250     /// FunctionName may be null.
7251     if (const char *FunctionName = TLI.getClearCacheBuiltinName())
7252       lowerCallToExternalSymbol(I, FunctionName);
7253     return;
7254   case Intrinsic::donothing:
7255   case Intrinsic::seh_try_begin:
7256   case Intrinsic::seh_scope_begin:
7257   case Intrinsic::seh_try_end:
7258   case Intrinsic::seh_scope_end:
7259     // ignore
7260     return;
7261   case Intrinsic::experimental_stackmap:
7262     visitStackmap(I);
7263     return;
7264   case Intrinsic::experimental_patchpoint_void:
7265   case Intrinsic::experimental_patchpoint_i64:
7266     visitPatchpoint(I);
7267     return;
7268   case Intrinsic::experimental_gc_statepoint:
7269     LowerStatepoint(cast<GCStatepointInst>(I));
7270     return;
7271   case Intrinsic::experimental_gc_result:
7272     visitGCResult(cast<GCResultInst>(I));
7273     return;
7274   case Intrinsic::experimental_gc_relocate:
7275     visitGCRelocate(cast<GCRelocateInst>(I));
7276     return;
7277   case Intrinsic::instrprof_cover:
7278     llvm_unreachable("instrprof failed to lower a cover");
7279   case Intrinsic::instrprof_increment:
7280     llvm_unreachable("instrprof failed to lower an increment");
7281   case Intrinsic::instrprof_timestamp:
7282     llvm_unreachable("instrprof failed to lower a timestamp");
7283   case Intrinsic::instrprof_value_profile:
7284     llvm_unreachable("instrprof failed to lower a value profiling call");
7285   case Intrinsic::instrprof_mcdc_parameters:
7286     llvm_unreachable("instrprof failed to lower mcdc parameters");
7287   case Intrinsic::instrprof_mcdc_tvbitmap_update:
7288     llvm_unreachable("instrprof failed to lower an mcdc tvbitmap update");
7289   case Intrinsic::instrprof_mcdc_condbitmap_update:
7290     llvm_unreachable("instrprof failed to lower an mcdc condbitmap update");
7291   case Intrinsic::localescape: {
7292     MachineFunction &MF = DAG.getMachineFunction();
7293     const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
7294 
7295     // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission
7296     // is the same on all targets.
7297     for (unsigned Idx = 0, E = I.arg_size(); Idx < E; ++Idx) {
7298       Value *Arg = I.getArgOperand(Idx)->stripPointerCasts();
7299       if (isa<ConstantPointerNull>(Arg))
7300         continue; // Skip null pointers. They represent a hole in index space.
7301       AllocaInst *Slot = cast<AllocaInst>(Arg);
7302       assert(FuncInfo.StaticAllocaMap.count(Slot) &&
7303              "can only escape static allocas");
7304       int FI = FuncInfo.StaticAllocaMap[Slot];
7305       MCSymbol *FrameAllocSym =
7306           MF.getMMI().getContext().getOrCreateFrameAllocSymbol(
7307               GlobalValue::dropLLVMManglingEscape(MF.getName()), Idx);
7308       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, dl,
7309               TII->get(TargetOpcode::LOCAL_ESCAPE))
7310           .addSym(FrameAllocSym)
7311           .addFrameIndex(FI);
7312     }
7313 
7314     return;
7315   }
7316 
7317   case Intrinsic::localrecover: {
7318     // i8* @llvm.localrecover(i8* %fn, i8* %fp, i32 %idx)
7319     MachineFunction &MF = DAG.getMachineFunction();
7320 
7321     // Get the symbol that defines the frame offset.
7322     auto *Fn = cast<Function>(I.getArgOperand(0)->stripPointerCasts());
7323     auto *Idx = cast<ConstantInt>(I.getArgOperand(2));
7324     unsigned IdxVal =
7325         unsigned(Idx->getLimitedValue(std::numeric_limits<int>::max()));
7326     MCSymbol *FrameAllocSym =
7327         MF.getMMI().getContext().getOrCreateFrameAllocSymbol(
7328             GlobalValue::dropLLVMManglingEscape(Fn->getName()), IdxVal);
7329 
7330     Value *FP = I.getArgOperand(1);
7331     SDValue FPVal = getValue(FP);
7332     EVT PtrVT = FPVal.getValueType();
7333 
7334     // Create a MCSymbol for the label to avoid any target lowering
7335     // that would make this PC relative.
7336     SDValue OffsetSym = DAG.getMCSymbol(FrameAllocSym, PtrVT);
7337     SDValue OffsetVal =
7338         DAG.getNode(ISD::LOCAL_RECOVER, sdl, PtrVT, OffsetSym);
7339 
7340     // Add the offset to the FP.
7341     SDValue Add = DAG.getMemBasePlusOffset(FPVal, OffsetVal, sdl);
7342     setValue(&I, Add);
7343 
7344     return;
7345   }
7346 
7347   case Intrinsic::eh_exceptionpointer:
7348   case Intrinsic::eh_exceptioncode: {
7349     // Get the exception pointer vreg, copy from it, and resize it to fit.
7350     const auto *CPI = cast<CatchPadInst>(I.getArgOperand(0));
7351     MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
7352     const TargetRegisterClass *PtrRC = TLI.getRegClassFor(PtrVT);
7353     unsigned VReg = FuncInfo.getCatchPadExceptionPointerVReg(CPI, PtrRC);
7354     SDValue N = DAG.getCopyFromReg(DAG.getEntryNode(), sdl, VReg, PtrVT);
7355     if (Intrinsic == Intrinsic::eh_exceptioncode)
7356       N = DAG.getZExtOrTrunc(N, sdl, MVT::i32);
7357     setValue(&I, N);
7358     return;
7359   }
7360   case Intrinsic::xray_customevent: {
7361     // Here we want to make sure that the intrinsic behaves as if it has a
7362     // specific calling convention.
7363     const auto &Triple = DAG.getTarget().getTargetTriple();
7364     if (!Triple.isAArch64(64) && Triple.getArch() != Triple::x86_64)
7365       return;
7366 
7367     SmallVector<SDValue, 8> Ops;
7368 
7369     // We want to say that we always want the arguments in registers.
7370     SDValue LogEntryVal = getValue(I.getArgOperand(0));
7371     SDValue StrSizeVal = getValue(I.getArgOperand(1));
7372     SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7373     SDValue Chain = getRoot();
7374     Ops.push_back(LogEntryVal);
7375     Ops.push_back(StrSizeVal);
7376     Ops.push_back(Chain);
7377 
7378     // We need to enforce the calling convention for the callsite, so that
7379     // argument ordering is enforced correctly, and that register allocation can
7380     // see that some registers may be assumed clobbered and have to preserve
7381     // them across calls to the intrinsic.
7382     MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHABLE_EVENT_CALL,
7383                                            sdl, NodeTys, Ops);
7384     SDValue patchableNode = SDValue(MN, 0);
7385     DAG.setRoot(patchableNode);
7386     setValue(&I, patchableNode);
7387     return;
7388   }
7389   case Intrinsic::xray_typedevent: {
7390     // Here we want to make sure that the intrinsic behaves as if it has a
7391     // specific calling convention.
7392     const auto &Triple = DAG.getTarget().getTargetTriple();
7393     if (!Triple.isAArch64(64) && Triple.getArch() != Triple::x86_64)
7394       return;
7395 
7396     SmallVector<SDValue, 8> Ops;
7397 
7398     // We want to say that we always want the arguments in registers.
7399     // It's unclear to me how manipulating the selection DAG here forces callers
7400     // to provide arguments in registers instead of on the stack.
7401     SDValue LogTypeId = getValue(I.getArgOperand(0));
7402     SDValue LogEntryVal = getValue(I.getArgOperand(1));
7403     SDValue StrSizeVal = getValue(I.getArgOperand(2));
7404     SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7405     SDValue Chain = getRoot();
7406     Ops.push_back(LogTypeId);
7407     Ops.push_back(LogEntryVal);
7408     Ops.push_back(StrSizeVal);
7409     Ops.push_back(Chain);
7410 
7411     // We need to enforce the calling convention for the callsite, so that
7412     // argument ordering is enforced correctly, and that register allocation can
7413     // see that some registers may be assumed clobbered and have to preserve
7414     // them across calls to the intrinsic.
7415     MachineSDNode *MN = DAG.getMachineNode(
7416         TargetOpcode::PATCHABLE_TYPED_EVENT_CALL, sdl, NodeTys, Ops);
7417     SDValue patchableNode = SDValue(MN, 0);
7418     DAG.setRoot(patchableNode);
7419     setValue(&I, patchableNode);
7420     return;
7421   }
7422   case Intrinsic::experimental_deoptimize:
7423     LowerDeoptimizeCall(&I);
7424     return;
7425   case Intrinsic::experimental_stepvector:
7426     visitStepVector(I);
7427     return;
7428   case Intrinsic::vector_reduce_fadd:
7429   case Intrinsic::vector_reduce_fmul:
7430   case Intrinsic::vector_reduce_add:
7431   case Intrinsic::vector_reduce_mul:
7432   case Intrinsic::vector_reduce_and:
7433   case Intrinsic::vector_reduce_or:
7434   case Intrinsic::vector_reduce_xor:
7435   case Intrinsic::vector_reduce_smax:
7436   case Intrinsic::vector_reduce_smin:
7437   case Intrinsic::vector_reduce_umax:
7438   case Intrinsic::vector_reduce_umin:
7439   case Intrinsic::vector_reduce_fmax:
7440   case Intrinsic::vector_reduce_fmin:
7441   case Intrinsic::vector_reduce_fmaximum:
7442   case Intrinsic::vector_reduce_fminimum:
7443     visitVectorReduce(I, Intrinsic);
7444     return;
7445 
7446   case Intrinsic::icall_branch_funnel: {
7447     SmallVector<SDValue, 16> Ops;
7448     Ops.push_back(getValue(I.getArgOperand(0)));
7449 
7450     int64_t Offset;
7451     auto *Base = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
7452         I.getArgOperand(1), Offset, DAG.getDataLayout()));
7453     if (!Base)
7454       report_fatal_error(
7455           "llvm.icall.branch.funnel operand must be a GlobalValue");
7456     Ops.push_back(DAG.getTargetGlobalAddress(Base, sdl, MVT::i64, 0));
7457 
7458     struct BranchFunnelTarget {
7459       int64_t Offset;
7460       SDValue Target;
7461     };
7462     SmallVector<BranchFunnelTarget, 8> Targets;
7463 
7464     for (unsigned Op = 1, N = I.arg_size(); Op != N; Op += 2) {
7465       auto *ElemBase = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
7466           I.getArgOperand(Op), Offset, DAG.getDataLayout()));
7467       if (ElemBase != Base)
7468         report_fatal_error("all llvm.icall.branch.funnel operands must refer "
7469                            "to the same GlobalValue");
7470 
7471       SDValue Val = getValue(I.getArgOperand(Op + 1));
7472       auto *GA = dyn_cast<GlobalAddressSDNode>(Val);
7473       if (!GA)
7474         report_fatal_error(
7475             "llvm.icall.branch.funnel operand must be a GlobalValue");
7476       Targets.push_back({Offset, DAG.getTargetGlobalAddress(
7477                                      GA->getGlobal(), sdl, Val.getValueType(),
7478                                      GA->getOffset())});
7479     }
7480     llvm::sort(Targets,
7481                [](const BranchFunnelTarget &T1, const BranchFunnelTarget &T2) {
7482                  return T1.Offset < T2.Offset;
7483                });
7484 
7485     for (auto &T : Targets) {
7486       Ops.push_back(DAG.getTargetConstant(T.Offset, sdl, MVT::i32));
7487       Ops.push_back(T.Target);
7488     }
7489 
7490     Ops.push_back(DAG.getRoot()); // Chain
7491     SDValue N(DAG.getMachineNode(TargetOpcode::ICALL_BRANCH_FUNNEL, sdl,
7492                                  MVT::Other, Ops),
7493               0);
7494     DAG.setRoot(N);
7495     setValue(&I, N);
7496     HasTailCall = true;
7497     return;
7498   }
7499 
7500   case Intrinsic::wasm_landingpad_index:
7501     // Information this intrinsic contained has been transferred to
7502     // MachineFunction in SelectionDAGISel::PrepareEHLandingPad. We can safely
7503     // delete it now.
7504     return;
7505 
7506   case Intrinsic::aarch64_settag:
7507   case Intrinsic::aarch64_settag_zero: {
7508     const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7509     bool ZeroMemory = Intrinsic == Intrinsic::aarch64_settag_zero;
7510     SDValue Val = TSI.EmitTargetCodeForSetTag(
7511         DAG, sdl, getRoot(), getValue(I.getArgOperand(0)),
7512         getValue(I.getArgOperand(1)), MachinePointerInfo(I.getArgOperand(0)),
7513         ZeroMemory);
7514     DAG.setRoot(Val);
7515     setValue(&I, Val);
7516     return;
7517   }
7518   case Intrinsic::amdgcn_cs_chain: {
7519     assert(I.arg_size() == 5 && "Additional args not supported yet");
7520     assert(cast<ConstantInt>(I.getOperand(4))->isZero() &&
7521            "Non-zero flags not supported yet");
7522 
7523     // At this point we don't care if it's amdgpu_cs_chain or
7524     // amdgpu_cs_chain_preserve.
7525     CallingConv::ID CC = CallingConv::AMDGPU_CS_Chain;
7526 
7527     Type *RetTy = I.getType();
7528     assert(RetTy->isVoidTy() && "Should not return");
7529 
7530     SDValue Callee = getValue(I.getOperand(0));
7531 
7532     // We only have 2 actual args: one for the SGPRs and one for the VGPRs.
7533     // We'll also tack the value of the EXEC mask at the end.
7534     TargetLowering::ArgListTy Args;
7535     Args.reserve(3);
7536 
7537     for (unsigned Idx : {2, 3, 1}) {
7538       TargetLowering::ArgListEntry Arg;
7539       Arg.Node = getValue(I.getOperand(Idx));
7540       Arg.Ty = I.getOperand(Idx)->getType();
7541       Arg.setAttributes(&I, Idx);
7542       Args.push_back(Arg);
7543     }
7544 
7545     assert(Args[0].IsInReg && "SGPR args should be marked inreg");
7546     assert(!Args[1].IsInReg && "VGPR args should not be marked inreg");
7547     Args[2].IsInReg = true; // EXEC should be inreg
7548 
7549     TargetLowering::CallLoweringInfo CLI(DAG);
7550     CLI.setDebugLoc(getCurSDLoc())
7551         .setChain(getRoot())
7552         .setCallee(CC, RetTy, Callee, std::move(Args))
7553         .setNoReturn(true)
7554         .setTailCall(true)
7555         .setConvergent(I.isConvergent());
7556     CLI.CB = &I;
7557     std::pair<SDValue, SDValue> Result =
7558         lowerInvokable(CLI, /*EHPadBB*/ nullptr);
7559     (void)Result;
7560     assert(!Result.first.getNode() && !Result.second.getNode() &&
7561            "Should've lowered as tail call");
7562 
7563     HasTailCall = true;
7564     return;
7565   }
7566   case Intrinsic::ptrmask: {
7567     SDValue Ptr = getValue(I.getOperand(0));
7568     SDValue Mask = getValue(I.getOperand(1));
7569 
7570     EVT PtrVT = Ptr.getValueType();
7571     assert(PtrVT == Mask.getValueType() &&
7572            "Pointers with different index type are not supported by SDAG");
7573     setValue(&I, DAG.getNode(ISD::AND, sdl, PtrVT, Ptr, Mask));
7574     return;
7575   }
7576   case Intrinsic::threadlocal_address: {
7577     setValue(&I, getValue(I.getOperand(0)));
7578     return;
7579   }
7580   case Intrinsic::get_active_lane_mask: {
7581     EVT CCVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7582     SDValue Index = getValue(I.getOperand(0));
7583     EVT ElementVT = Index.getValueType();
7584 
7585     if (!TLI.shouldExpandGetActiveLaneMask(CCVT, ElementVT)) {
7586       visitTargetIntrinsic(I, Intrinsic);
7587       return;
7588     }
7589 
7590     SDValue TripCount = getValue(I.getOperand(1));
7591     EVT VecTy = EVT::getVectorVT(*DAG.getContext(), ElementVT,
7592                                  CCVT.getVectorElementCount());
7593 
7594     SDValue VectorIndex = DAG.getSplat(VecTy, sdl, Index);
7595     SDValue VectorTripCount = DAG.getSplat(VecTy, sdl, TripCount);
7596     SDValue VectorStep = DAG.getStepVector(sdl, VecTy);
7597     SDValue VectorInduction = DAG.getNode(
7598         ISD::UADDSAT, sdl, VecTy, VectorIndex, VectorStep);
7599     SDValue SetCC = DAG.getSetCC(sdl, CCVT, VectorInduction,
7600                                  VectorTripCount, ISD::CondCode::SETULT);
7601     setValue(&I, SetCC);
7602     return;
7603   }
7604   case Intrinsic::experimental_get_vector_length: {
7605     assert(cast<ConstantInt>(I.getOperand(1))->getSExtValue() > 0 &&
7606            "Expected positive VF");
7607     unsigned VF = cast<ConstantInt>(I.getOperand(1))->getZExtValue();
7608     bool IsScalable = cast<ConstantInt>(I.getOperand(2))->isOne();
7609 
7610     SDValue Count = getValue(I.getOperand(0));
7611     EVT CountVT = Count.getValueType();
7612 
7613     if (!TLI.shouldExpandGetVectorLength(CountVT, VF, IsScalable)) {
7614       visitTargetIntrinsic(I, Intrinsic);
7615       return;
7616     }
7617 
7618     // Expand to a umin between the trip count and the maximum elements the type
7619     // can hold.
7620     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7621 
7622     // Extend the trip count to at least the result VT.
7623     if (CountVT.bitsLT(VT)) {
7624       Count = DAG.getNode(ISD::ZERO_EXTEND, sdl, VT, Count);
7625       CountVT = VT;
7626     }
7627 
7628     SDValue MaxEVL = DAG.getElementCount(sdl, CountVT,
7629                                          ElementCount::get(VF, IsScalable));
7630 
7631     SDValue UMin = DAG.getNode(ISD::UMIN, sdl, CountVT, Count, MaxEVL);
7632     // Clip to the result type if needed.
7633     SDValue Trunc = DAG.getNode(ISD::TRUNCATE, sdl, VT, UMin);
7634 
7635     setValue(&I, Trunc);
7636     return;
7637   }
7638   case Intrinsic::experimental_cttz_elts: {
7639     auto DL = getCurSDLoc();
7640     SDValue Op = getValue(I.getOperand(0));
7641     EVT OpVT = Op.getValueType();
7642 
7643     if (!TLI.shouldExpandCttzElements(OpVT)) {
7644       visitTargetIntrinsic(I, Intrinsic);
7645       return;
7646     }
7647 
7648     if (OpVT.getScalarType() != MVT::i1) {
7649       // Compare the input vector elements to zero & use to count trailing zeros
7650       SDValue AllZero = DAG.getConstant(0, DL, OpVT);
7651       OpVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
7652                               OpVT.getVectorElementCount());
7653       Op = DAG.getSetCC(DL, OpVT, Op, AllZero, ISD::SETNE);
7654     }
7655 
7656     // Find the smallest "sensible" element type to use for the expansion.
7657     ConstantRange CR(
7658         APInt(64, OpVT.getVectorElementCount().getKnownMinValue()));
7659     if (OpVT.isScalableVT())
7660       CR = CR.umul_sat(getVScaleRange(I.getCaller(), 64));
7661 
7662     // If the zero-is-poison flag is set, we can assume the upper limit
7663     // of the result is VF-1.
7664     if (!cast<ConstantSDNode>(getValue(I.getOperand(1)))->isZero())
7665       CR = CR.subtract(APInt(64, 1));
7666 
7667     unsigned EltWidth = I.getType()->getScalarSizeInBits();
7668     EltWidth = std::min(EltWidth, (unsigned)CR.getActiveBits());
7669     EltWidth = std::max(llvm::bit_ceil(EltWidth), (unsigned)8);
7670 
7671     MVT NewEltTy = MVT::getIntegerVT(EltWidth);
7672 
7673     // Create the new vector type & get the vector length
7674     EVT NewVT = EVT::getVectorVT(*DAG.getContext(), NewEltTy,
7675                                  OpVT.getVectorElementCount());
7676 
7677     SDValue VL =
7678         DAG.getElementCount(DL, NewEltTy, OpVT.getVectorElementCount());
7679 
7680     SDValue StepVec = DAG.getStepVector(DL, NewVT);
7681     SDValue SplatVL = DAG.getSplat(NewVT, DL, VL);
7682     SDValue StepVL = DAG.getNode(ISD::SUB, DL, NewVT, SplatVL, StepVec);
7683     SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, Op);
7684     SDValue And = DAG.getNode(ISD::AND, DL, NewVT, StepVL, Ext);
7685     SDValue Max = DAG.getNode(ISD::VECREDUCE_UMAX, DL, NewEltTy, And);
7686     SDValue Sub = DAG.getNode(ISD::SUB, DL, NewEltTy, VL, Max);
7687 
7688     EVT RetTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
7689     SDValue Ret = DAG.getZExtOrTrunc(Sub, DL, RetTy);
7690 
7691     setValue(&I, Ret);
7692     return;
7693   }
7694   case Intrinsic::vector_insert: {
7695     SDValue Vec = getValue(I.getOperand(0));
7696     SDValue SubVec = getValue(I.getOperand(1));
7697     SDValue Index = getValue(I.getOperand(2));
7698 
7699     // The intrinsic's index type is i64, but the SDNode requires an index type
7700     // suitable for the target. Convert the index as required.
7701     MVT VectorIdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
7702     if (Index.getValueType() != VectorIdxTy)
7703       Index = DAG.getVectorIdxConstant(Index->getAsZExtVal(), sdl);
7704 
7705     EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7706     setValue(&I, DAG.getNode(ISD::INSERT_SUBVECTOR, sdl, ResultVT, Vec, SubVec,
7707                              Index));
7708     return;
7709   }
7710   case Intrinsic::vector_extract: {
7711     SDValue Vec = getValue(I.getOperand(0));
7712     SDValue Index = getValue(I.getOperand(1));
7713     EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7714 
7715     // The intrinsic's index type is i64, but the SDNode requires an index type
7716     // suitable for the target. Convert the index as required.
7717     MVT VectorIdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
7718     if (Index.getValueType() != VectorIdxTy)
7719       Index = DAG.getVectorIdxConstant(Index->getAsZExtVal(), sdl);
7720 
7721     setValue(&I,
7722              DAG.getNode(ISD::EXTRACT_SUBVECTOR, sdl, ResultVT, Vec, Index));
7723     return;
7724   }
7725   case Intrinsic::experimental_vector_reverse:
7726     visitVectorReverse(I);
7727     return;
7728   case Intrinsic::experimental_vector_splice:
7729     visitVectorSplice(I);
7730     return;
7731   case Intrinsic::callbr_landingpad:
7732     visitCallBrLandingPad(I);
7733     return;
7734   case Intrinsic::experimental_vector_interleave2:
7735     visitVectorInterleave(I);
7736     return;
7737   case Intrinsic::experimental_vector_deinterleave2:
7738     visitVectorDeinterleave(I);
7739     return;
7740   }
7741 }
7742 
7743 void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
7744     const ConstrainedFPIntrinsic &FPI) {
7745   SDLoc sdl = getCurSDLoc();
7746 
7747   // We do not need to serialize constrained FP intrinsics against
7748   // each other or against (nonvolatile) loads, so they can be
7749   // chained like loads.
7750   SDValue Chain = DAG.getRoot();
7751   SmallVector<SDValue, 4> Opers;
7752   Opers.push_back(Chain);
7753   if (FPI.isUnaryOp()) {
7754     Opers.push_back(getValue(FPI.getArgOperand(0)));
7755   } else if (FPI.isTernaryOp()) {
7756     Opers.push_back(getValue(FPI.getArgOperand(0)));
7757     Opers.push_back(getValue(FPI.getArgOperand(1)));
7758     Opers.push_back(getValue(FPI.getArgOperand(2)));
7759   } else {
7760     Opers.push_back(getValue(FPI.getArgOperand(0)));
7761     Opers.push_back(getValue(FPI.getArgOperand(1)));
7762   }
7763 
7764   auto pushOutChain = [this](SDValue Result, fp::ExceptionBehavior EB) {
7765     assert(Result.getNode()->getNumValues() == 2);
7766 
7767     // Push node to the appropriate list so that future instructions can be
7768     // chained up correctly.
7769     SDValue OutChain = Result.getValue(1);
7770     switch (EB) {
7771     case fp::ExceptionBehavior::ebIgnore:
7772       // The only reason why ebIgnore nodes still need to be chained is that
7773       // they might depend on the current rounding mode, and therefore must
7774       // not be moved across instruction that may change that mode.
7775       [[fallthrough]];
7776     case fp::ExceptionBehavior::ebMayTrap:
7777       // These must not be moved across calls or instructions that may change
7778       // floating-point exception masks.
7779       PendingConstrainedFP.push_back(OutChain);
7780       break;
7781     case fp::ExceptionBehavior::ebStrict:
7782       // These must not be moved across calls or instructions that may change
7783       // floating-point exception masks or read floating-point exception flags.
7784       // In addition, they cannot be optimized out even if unused.
7785       PendingConstrainedFPStrict.push_back(OutChain);
7786       break;
7787     }
7788   };
7789 
7790   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7791   EVT VT = TLI.getValueType(DAG.getDataLayout(), FPI.getType());
7792   SDVTList VTs = DAG.getVTList(VT, MVT::Other);
7793   fp::ExceptionBehavior EB = *FPI.getExceptionBehavior();
7794 
7795   SDNodeFlags Flags;
7796   if (EB == fp::ExceptionBehavior::ebIgnore)
7797     Flags.setNoFPExcept(true);
7798 
7799   if (auto *FPOp = dyn_cast<FPMathOperator>(&FPI))
7800     Flags.copyFMF(*FPOp);
7801 
7802   unsigned Opcode;
7803   switch (FPI.getIntrinsicID()) {
7804   default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
7805 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
7806   case Intrinsic::INTRINSIC:                                                   \
7807     Opcode = ISD::STRICT_##DAGN;                                               \
7808     break;
7809 #include "llvm/IR/ConstrainedOps.def"
7810   case Intrinsic::experimental_constrained_fmuladd: {
7811     Opcode = ISD::STRICT_FMA;
7812     // Break fmuladd into fmul and fadd.
7813     if (TM.Options.AllowFPOpFusion == FPOpFusion::Strict ||
7814         !TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT)) {
7815       Opers.pop_back();
7816       SDValue Mul = DAG.getNode(ISD::STRICT_FMUL, sdl, VTs, Opers, Flags);
7817       pushOutChain(Mul, EB);
7818       Opcode = ISD::STRICT_FADD;
7819       Opers.clear();
7820       Opers.push_back(Mul.getValue(1));
7821       Opers.push_back(Mul.getValue(0));
7822       Opers.push_back(getValue(FPI.getArgOperand(2)));
7823     }
7824     break;
7825   }
7826   }
7827 
7828   // A few strict DAG nodes carry additional operands that are not
7829   // set up by the default code above.
7830   switch (Opcode) {
7831   default: break;
7832   case ISD::STRICT_FP_ROUND:
7833     Opers.push_back(
7834         DAG.getTargetConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout())));
7835     break;
7836   case ISD::STRICT_FSETCC:
7837   case ISD::STRICT_FSETCCS: {
7838     auto *FPCmp = dyn_cast<ConstrainedFPCmpIntrinsic>(&FPI);
7839     ISD::CondCode Condition = getFCmpCondCode(FPCmp->getPredicate());
7840     if (TM.Options.NoNaNsFPMath)
7841       Condition = getFCmpCodeWithoutNaN(Condition);
7842     Opers.push_back(DAG.getCondCode(Condition));
7843     break;
7844   }
7845   }
7846 
7847   SDValue Result = DAG.getNode(Opcode, sdl, VTs, Opers, Flags);
7848   pushOutChain(Result, EB);
7849 
7850   SDValue FPResult = Result.getValue(0);
7851   setValue(&FPI, FPResult);
7852 }
7853 
7854 static unsigned getISDForVPIntrinsic(const VPIntrinsic &VPIntrin) {
7855   std::optional<unsigned> ResOPC;
7856   switch (VPIntrin.getIntrinsicID()) {
7857   case Intrinsic::vp_ctlz: {
7858     bool IsZeroUndef = cast<ConstantInt>(VPIntrin.getArgOperand(1))->isOne();
7859     ResOPC = IsZeroUndef ? ISD::VP_CTLZ_ZERO_UNDEF : ISD::VP_CTLZ;
7860     break;
7861   }
7862   case Intrinsic::vp_cttz: {
7863     bool IsZeroUndef = cast<ConstantInt>(VPIntrin.getArgOperand(1))->isOne();
7864     ResOPC = IsZeroUndef ? ISD::VP_CTTZ_ZERO_UNDEF : ISD::VP_CTTZ;
7865     break;
7866   }
7867 #define HELPER_MAP_VPID_TO_VPSD(VPID, VPSD)                                    \
7868   case Intrinsic::VPID:                                                        \
7869     ResOPC = ISD::VPSD;                                                        \
7870     break;
7871 #include "llvm/IR/VPIntrinsics.def"
7872   }
7873 
7874   if (!ResOPC)
7875     llvm_unreachable(
7876         "Inconsistency: no SDNode available for this VPIntrinsic!");
7877 
7878   if (*ResOPC == ISD::VP_REDUCE_SEQ_FADD ||
7879       *ResOPC == ISD::VP_REDUCE_SEQ_FMUL) {
7880     if (VPIntrin.getFastMathFlags().allowReassoc())
7881       return *ResOPC == ISD::VP_REDUCE_SEQ_FADD ? ISD::VP_REDUCE_FADD
7882                                                 : ISD::VP_REDUCE_FMUL;
7883   }
7884 
7885   return *ResOPC;
7886 }
7887 
7888 void SelectionDAGBuilder::visitVPLoad(
7889     const VPIntrinsic &VPIntrin, EVT VT,
7890     const SmallVectorImpl<SDValue> &OpValues) {
7891   SDLoc DL = getCurSDLoc();
7892   Value *PtrOperand = VPIntrin.getArgOperand(0);
7893   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
7894   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
7895   const MDNode *Ranges = getRangeMetadata(VPIntrin);
7896   SDValue LD;
7897   // Do not serialize variable-length loads of constant memory with
7898   // anything.
7899   if (!Alignment)
7900     Alignment = DAG.getEVTAlign(VT);
7901   MemoryLocation ML = MemoryLocation::getAfter(PtrOperand, AAInfo);
7902   bool AddToChain = !AA || !AA->pointsToConstantMemory(ML);
7903   SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
7904   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
7905       MachinePointerInfo(PtrOperand), MachineMemOperand::MOLoad,
7906       MemoryLocation::UnknownSize, *Alignment, AAInfo, Ranges);
7907   LD = DAG.getLoadVP(VT, DL, InChain, OpValues[0], OpValues[1], OpValues[2],
7908                      MMO, false /*IsExpanding */);
7909   if (AddToChain)
7910     PendingLoads.push_back(LD.getValue(1));
7911   setValue(&VPIntrin, LD);
7912 }
7913 
7914 void SelectionDAGBuilder::visitVPGather(
7915     const VPIntrinsic &VPIntrin, EVT VT,
7916     const SmallVectorImpl<SDValue> &OpValues) {
7917   SDLoc DL = getCurSDLoc();
7918   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7919   Value *PtrOperand = VPIntrin.getArgOperand(0);
7920   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
7921   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
7922   const MDNode *Ranges = getRangeMetadata(VPIntrin);
7923   SDValue LD;
7924   if (!Alignment)
7925     Alignment = DAG.getEVTAlign(VT.getScalarType());
7926   unsigned AS =
7927     PtrOperand->getType()->getScalarType()->getPointerAddressSpace();
7928   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
7929      MachinePointerInfo(AS), MachineMemOperand::MOLoad,
7930      MemoryLocation::UnknownSize, *Alignment, AAInfo, Ranges);
7931   SDValue Base, Index, Scale;
7932   ISD::MemIndexType IndexType;
7933   bool UniformBase = getUniformBase(PtrOperand, Base, Index, IndexType, Scale,
7934                                     this, VPIntrin.getParent(),
7935                                     VT.getScalarStoreSize());
7936   if (!UniformBase) {
7937     Base = DAG.getConstant(0, DL, TLI.getPointerTy(DAG.getDataLayout()));
7938     Index = getValue(PtrOperand);
7939     IndexType = ISD::SIGNED_SCALED;
7940     Scale = DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout()));
7941   }
7942   EVT IdxVT = Index.getValueType();
7943   EVT EltTy = IdxVT.getVectorElementType();
7944   if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
7945     EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
7946     Index = DAG.getNode(ISD::SIGN_EXTEND, DL, NewIdxVT, Index);
7947   }
7948   LD = DAG.getGatherVP(
7949       DAG.getVTList(VT, MVT::Other), VT, DL,
7950       {DAG.getRoot(), Base, Index, Scale, OpValues[1], OpValues[2]}, MMO,
7951       IndexType);
7952   PendingLoads.push_back(LD.getValue(1));
7953   setValue(&VPIntrin, LD);
7954 }
7955 
7956 void SelectionDAGBuilder::visitVPStore(
7957     const VPIntrinsic &VPIntrin, const SmallVectorImpl<SDValue> &OpValues) {
7958   SDLoc DL = getCurSDLoc();
7959   Value *PtrOperand = VPIntrin.getArgOperand(1);
7960   EVT VT = OpValues[0].getValueType();
7961   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
7962   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
7963   SDValue ST;
7964   if (!Alignment)
7965     Alignment = DAG.getEVTAlign(VT);
7966   SDValue Ptr = OpValues[1];
7967   SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
7968   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
7969       MachinePointerInfo(PtrOperand), MachineMemOperand::MOStore,
7970       MemoryLocation::UnknownSize, *Alignment, AAInfo);
7971   ST = DAG.getStoreVP(getMemoryRoot(), DL, OpValues[0], Ptr, Offset,
7972                       OpValues[2], OpValues[3], VT, MMO, ISD::UNINDEXED,
7973                       /* IsTruncating */ false, /*IsCompressing*/ false);
7974   DAG.setRoot(ST);
7975   setValue(&VPIntrin, ST);
7976 }
7977 
7978 void SelectionDAGBuilder::visitVPScatter(
7979     const VPIntrinsic &VPIntrin, const SmallVectorImpl<SDValue> &OpValues) {
7980   SDLoc DL = getCurSDLoc();
7981   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7982   Value *PtrOperand = VPIntrin.getArgOperand(1);
7983   EVT VT = OpValues[0].getValueType();
7984   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
7985   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
7986   SDValue ST;
7987   if (!Alignment)
7988     Alignment = DAG.getEVTAlign(VT.getScalarType());
7989   unsigned AS =
7990       PtrOperand->getType()->getScalarType()->getPointerAddressSpace();
7991   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
7992       MachinePointerInfo(AS), MachineMemOperand::MOStore,
7993       MemoryLocation::UnknownSize, *Alignment, AAInfo);
7994   SDValue Base, Index, Scale;
7995   ISD::MemIndexType IndexType;
7996   bool UniformBase = getUniformBase(PtrOperand, Base, Index, IndexType, Scale,
7997                                     this, VPIntrin.getParent(),
7998                                     VT.getScalarStoreSize());
7999   if (!UniformBase) {
8000     Base = DAG.getConstant(0, DL, TLI.getPointerTy(DAG.getDataLayout()));
8001     Index = getValue(PtrOperand);
8002     IndexType = ISD::SIGNED_SCALED;
8003     Scale =
8004       DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout()));
8005   }
8006   EVT IdxVT = Index.getValueType();
8007   EVT EltTy = IdxVT.getVectorElementType();
8008   if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
8009     EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
8010     Index = DAG.getNode(ISD::SIGN_EXTEND, DL, NewIdxVT, Index);
8011   }
8012   ST = DAG.getScatterVP(DAG.getVTList(MVT::Other), VT, DL,
8013                         {getMemoryRoot(), OpValues[0], Base, Index, Scale,
8014                          OpValues[2], OpValues[3]},
8015                         MMO, IndexType);
8016   DAG.setRoot(ST);
8017   setValue(&VPIntrin, ST);
8018 }
8019 
8020 void SelectionDAGBuilder::visitVPStridedLoad(
8021     const VPIntrinsic &VPIntrin, EVT VT,
8022     const SmallVectorImpl<SDValue> &OpValues) {
8023   SDLoc DL = getCurSDLoc();
8024   Value *PtrOperand = VPIntrin.getArgOperand(0);
8025   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
8026   if (!Alignment)
8027     Alignment = DAG.getEVTAlign(VT.getScalarType());
8028   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
8029   const MDNode *Ranges = getRangeMetadata(VPIntrin);
8030   MemoryLocation ML = MemoryLocation::getAfter(PtrOperand, AAInfo);
8031   bool AddToChain = !AA || !AA->pointsToConstantMemory(ML);
8032   SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
8033   unsigned AS = PtrOperand->getType()->getPointerAddressSpace();
8034   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
8035       MachinePointerInfo(AS), MachineMemOperand::MOLoad,
8036       MemoryLocation::UnknownSize, *Alignment, AAInfo, Ranges);
8037 
8038   SDValue LD = DAG.getStridedLoadVP(VT, DL, InChain, OpValues[0], OpValues[1],
8039                                     OpValues[2], OpValues[3], MMO,
8040                                     false /*IsExpanding*/);
8041 
8042   if (AddToChain)
8043     PendingLoads.push_back(LD.getValue(1));
8044   setValue(&VPIntrin, LD);
8045 }
8046 
8047 void SelectionDAGBuilder::visitVPStridedStore(
8048     const VPIntrinsic &VPIntrin, const SmallVectorImpl<SDValue> &OpValues) {
8049   SDLoc DL = getCurSDLoc();
8050   Value *PtrOperand = VPIntrin.getArgOperand(1);
8051   EVT VT = OpValues[0].getValueType();
8052   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
8053   if (!Alignment)
8054     Alignment = DAG.getEVTAlign(VT.getScalarType());
8055   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
8056   unsigned AS = PtrOperand->getType()->getPointerAddressSpace();
8057   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
8058       MachinePointerInfo(AS), MachineMemOperand::MOStore,
8059       MemoryLocation::UnknownSize, *Alignment, AAInfo);
8060 
8061   SDValue ST = DAG.getStridedStoreVP(
8062       getMemoryRoot(), DL, OpValues[0], OpValues[1],
8063       DAG.getUNDEF(OpValues[1].getValueType()), OpValues[2], OpValues[3],
8064       OpValues[4], VT, MMO, ISD::UNINDEXED, /*IsTruncating*/ false,
8065       /*IsCompressing*/ false);
8066 
8067   DAG.setRoot(ST);
8068   setValue(&VPIntrin, ST);
8069 }
8070 
8071 void SelectionDAGBuilder::visitVPCmp(const VPCmpIntrinsic &VPIntrin) {
8072   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8073   SDLoc DL = getCurSDLoc();
8074 
8075   ISD::CondCode Condition;
8076   CmpInst::Predicate CondCode = VPIntrin.getPredicate();
8077   bool IsFP = VPIntrin.getOperand(0)->getType()->isFPOrFPVectorTy();
8078   if (IsFP) {
8079     // FIXME: Regular fcmps are FPMathOperators which may have fast-math (nnan)
8080     // flags, but calls that don't return floating-point types can't be
8081     // FPMathOperators, like vp.fcmp. This affects constrained fcmp too.
8082     Condition = getFCmpCondCode(CondCode);
8083     if (TM.Options.NoNaNsFPMath)
8084       Condition = getFCmpCodeWithoutNaN(Condition);
8085   } else {
8086     Condition = getICmpCondCode(CondCode);
8087   }
8088 
8089   SDValue Op1 = getValue(VPIntrin.getOperand(0));
8090   SDValue Op2 = getValue(VPIntrin.getOperand(1));
8091   // #2 is the condition code
8092   SDValue MaskOp = getValue(VPIntrin.getOperand(3));
8093   SDValue EVL = getValue(VPIntrin.getOperand(4));
8094   MVT EVLParamVT = TLI.getVPExplicitVectorLengthTy();
8095   assert(EVLParamVT.isScalarInteger() && EVLParamVT.bitsGE(MVT::i32) &&
8096          "Unexpected target EVL type");
8097   EVL = DAG.getNode(ISD::ZERO_EXTEND, DL, EVLParamVT, EVL);
8098 
8099   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
8100                                                         VPIntrin.getType());
8101   setValue(&VPIntrin,
8102            DAG.getSetCCVP(DL, DestVT, Op1, Op2, Condition, MaskOp, EVL));
8103 }
8104 
8105 void SelectionDAGBuilder::visitVectorPredicationIntrinsic(
8106     const VPIntrinsic &VPIntrin) {
8107   SDLoc DL = getCurSDLoc();
8108   unsigned Opcode = getISDForVPIntrinsic(VPIntrin);
8109 
8110   auto IID = VPIntrin.getIntrinsicID();
8111 
8112   if (const auto *CmpI = dyn_cast<VPCmpIntrinsic>(&VPIntrin))
8113     return visitVPCmp(*CmpI);
8114 
8115   SmallVector<EVT, 4> ValueVTs;
8116   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8117   ComputeValueVTs(TLI, DAG.getDataLayout(), VPIntrin.getType(), ValueVTs);
8118   SDVTList VTs = DAG.getVTList(ValueVTs);
8119 
8120   auto EVLParamPos = VPIntrinsic::getVectorLengthParamPos(IID);
8121 
8122   MVT EVLParamVT = TLI.getVPExplicitVectorLengthTy();
8123   assert(EVLParamVT.isScalarInteger() && EVLParamVT.bitsGE(MVT::i32) &&
8124          "Unexpected target EVL type");
8125 
8126   // Request operands.
8127   SmallVector<SDValue, 7> OpValues;
8128   for (unsigned I = 0; I < VPIntrin.arg_size(); ++I) {
8129     auto Op = getValue(VPIntrin.getArgOperand(I));
8130     if (I == EVLParamPos)
8131       Op = DAG.getNode(ISD::ZERO_EXTEND, DL, EVLParamVT, Op);
8132     OpValues.push_back(Op);
8133   }
8134 
8135   switch (Opcode) {
8136   default: {
8137     SDNodeFlags SDFlags;
8138     if (auto *FPMO = dyn_cast<FPMathOperator>(&VPIntrin))
8139       SDFlags.copyFMF(*FPMO);
8140     SDValue Result = DAG.getNode(Opcode, DL, VTs, OpValues, SDFlags);
8141     setValue(&VPIntrin, Result);
8142     break;
8143   }
8144   case ISD::VP_LOAD:
8145     visitVPLoad(VPIntrin, ValueVTs[0], OpValues);
8146     break;
8147   case ISD::VP_GATHER:
8148     visitVPGather(VPIntrin, ValueVTs[0], OpValues);
8149     break;
8150   case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
8151     visitVPStridedLoad(VPIntrin, ValueVTs[0], OpValues);
8152     break;
8153   case ISD::VP_STORE:
8154     visitVPStore(VPIntrin, OpValues);
8155     break;
8156   case ISD::VP_SCATTER:
8157     visitVPScatter(VPIntrin, OpValues);
8158     break;
8159   case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
8160     visitVPStridedStore(VPIntrin, OpValues);
8161     break;
8162   case ISD::VP_FMULADD: {
8163     assert(OpValues.size() == 5 && "Unexpected number of operands");
8164     SDNodeFlags SDFlags;
8165     if (auto *FPMO = dyn_cast<FPMathOperator>(&VPIntrin))
8166       SDFlags.copyFMF(*FPMO);
8167     if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
8168         TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), ValueVTs[0])) {
8169       setValue(&VPIntrin, DAG.getNode(ISD::VP_FMA, DL, VTs, OpValues, SDFlags));
8170     } else {
8171       SDValue Mul = DAG.getNode(
8172           ISD::VP_FMUL, DL, VTs,
8173           {OpValues[0], OpValues[1], OpValues[3], OpValues[4]}, SDFlags);
8174       SDValue Add =
8175           DAG.getNode(ISD::VP_FADD, DL, VTs,
8176                       {Mul, OpValues[2], OpValues[3], OpValues[4]}, SDFlags);
8177       setValue(&VPIntrin, Add);
8178     }
8179     break;
8180   }
8181   case ISD::VP_IS_FPCLASS: {
8182     const DataLayout DLayout = DAG.getDataLayout();
8183     EVT DestVT = TLI.getValueType(DLayout, VPIntrin.getType());
8184     auto Constant = OpValues[1]->getAsZExtVal();
8185     SDValue Check = DAG.getTargetConstant(Constant, DL, MVT::i32);
8186     SDValue V = DAG.getNode(ISD::VP_IS_FPCLASS, DL, DestVT,
8187                             {OpValues[0], Check, OpValues[2], OpValues[3]});
8188     setValue(&VPIntrin, V);
8189     return;
8190   }
8191   case ISD::VP_INTTOPTR: {
8192     SDValue N = OpValues[0];
8193     EVT DestVT = TLI.getValueType(DAG.getDataLayout(), VPIntrin.getType());
8194     EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(), VPIntrin.getType());
8195     N = DAG.getVPPtrExtOrTrunc(getCurSDLoc(), DestVT, N, OpValues[1],
8196                                OpValues[2]);
8197     N = DAG.getVPZExtOrTrunc(getCurSDLoc(), PtrMemVT, N, OpValues[1],
8198                              OpValues[2]);
8199     setValue(&VPIntrin, N);
8200     break;
8201   }
8202   case ISD::VP_PTRTOINT: {
8203     SDValue N = OpValues[0];
8204     EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
8205                                                           VPIntrin.getType());
8206     EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(),
8207                                        VPIntrin.getOperand(0)->getType());
8208     N = DAG.getVPPtrExtOrTrunc(getCurSDLoc(), PtrMemVT, N, OpValues[1],
8209                                OpValues[2]);
8210     N = DAG.getVPZExtOrTrunc(getCurSDLoc(), DestVT, N, OpValues[1],
8211                              OpValues[2]);
8212     setValue(&VPIntrin, N);
8213     break;
8214   }
8215   case ISD::VP_ABS:
8216   case ISD::VP_CTLZ:
8217   case ISD::VP_CTLZ_ZERO_UNDEF:
8218   case ISD::VP_CTTZ:
8219   case ISD::VP_CTTZ_ZERO_UNDEF: {
8220     SDValue Result =
8221         DAG.getNode(Opcode, DL, VTs, {OpValues[0], OpValues[2], OpValues[3]});
8222     setValue(&VPIntrin, Result);
8223     break;
8224   }
8225   }
8226 }
8227 
8228 SDValue SelectionDAGBuilder::lowerStartEH(SDValue Chain,
8229                                           const BasicBlock *EHPadBB,
8230                                           MCSymbol *&BeginLabel) {
8231   MachineFunction &MF = DAG.getMachineFunction();
8232   MachineModuleInfo &MMI = MF.getMMI();
8233 
8234   // Insert a label before the invoke call to mark the try range.  This can be
8235   // used to detect deletion of the invoke via the MachineModuleInfo.
8236   BeginLabel = MMI.getContext().createTempSymbol();
8237 
8238   // For SjLj, keep track of which landing pads go with which invokes
8239   // so as to maintain the ordering of pads in the LSDA.
8240   unsigned CallSiteIndex = MMI.getCurrentCallSite();
8241   if (CallSiteIndex) {
8242     MF.setCallSiteBeginLabel(BeginLabel, CallSiteIndex);
8243     LPadToCallSiteMap[FuncInfo.MBBMap[EHPadBB]].push_back(CallSiteIndex);
8244 
8245     // Now that the call site is handled, stop tracking it.
8246     MMI.setCurrentCallSite(0);
8247   }
8248 
8249   return DAG.getEHLabel(getCurSDLoc(), Chain, BeginLabel);
8250 }
8251 
8252 SDValue SelectionDAGBuilder::lowerEndEH(SDValue Chain, const InvokeInst *II,
8253                                         const BasicBlock *EHPadBB,
8254                                         MCSymbol *BeginLabel) {
8255   assert(BeginLabel && "BeginLabel should've been set");
8256 
8257   MachineFunction &MF = DAG.getMachineFunction();
8258   MachineModuleInfo &MMI = MF.getMMI();
8259 
8260   // Insert a label at the end of the invoke call to mark the try range.  This
8261   // can be used to detect deletion of the invoke via the MachineModuleInfo.
8262   MCSymbol *EndLabel = MMI.getContext().createTempSymbol();
8263   Chain = DAG.getEHLabel(getCurSDLoc(), Chain, EndLabel);
8264 
8265   // Inform MachineModuleInfo of range.
8266   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
8267   // There is a platform (e.g. wasm) that uses funclet style IR but does not
8268   // actually use outlined funclets and their LSDA info style.
8269   if (MF.hasEHFunclets() && isFuncletEHPersonality(Pers)) {
8270     assert(II && "II should've been set");
8271     WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
8272     EHInfo->addIPToStateRange(II, BeginLabel, EndLabel);
8273   } else if (!isScopedEHPersonality(Pers)) {
8274     assert(EHPadBB);
8275     MF.addInvoke(FuncInfo.MBBMap[EHPadBB], BeginLabel, EndLabel);
8276   }
8277 
8278   return Chain;
8279 }
8280 
8281 std::pair<SDValue, SDValue>
8282 SelectionDAGBuilder::lowerInvokable(TargetLowering::CallLoweringInfo &CLI,
8283                                     const BasicBlock *EHPadBB) {
8284   MCSymbol *BeginLabel = nullptr;
8285 
8286   if (EHPadBB) {
8287     // Both PendingLoads and PendingExports must be flushed here;
8288     // this call might not return.
8289     (void)getRoot();
8290     DAG.setRoot(lowerStartEH(getControlRoot(), EHPadBB, BeginLabel));
8291     CLI.setChain(getRoot());
8292   }
8293 
8294   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8295   std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
8296 
8297   assert((CLI.IsTailCall || Result.second.getNode()) &&
8298          "Non-null chain expected with non-tail call!");
8299   assert((Result.second.getNode() || !Result.first.getNode()) &&
8300          "Null value expected with tail call!");
8301 
8302   if (!Result.second.getNode()) {
8303     // As a special case, a null chain means that a tail call has been emitted
8304     // and the DAG root is already updated.
8305     HasTailCall = true;
8306 
8307     // Since there's no actual continuation from this block, nothing can be
8308     // relying on us setting vregs for them.
8309     PendingExports.clear();
8310   } else {
8311     DAG.setRoot(Result.second);
8312   }
8313 
8314   if (EHPadBB) {
8315     DAG.setRoot(lowerEndEH(getRoot(), cast_or_null<InvokeInst>(CLI.CB), EHPadBB,
8316                            BeginLabel));
8317   }
8318 
8319   return Result;
8320 }
8321 
8322 void SelectionDAGBuilder::LowerCallTo(const CallBase &CB, SDValue Callee,
8323                                       bool isTailCall,
8324                                       bool isMustTailCall,
8325                                       const BasicBlock *EHPadBB) {
8326   auto &DL = DAG.getDataLayout();
8327   FunctionType *FTy = CB.getFunctionType();
8328   Type *RetTy = CB.getType();
8329 
8330   TargetLowering::ArgListTy Args;
8331   Args.reserve(CB.arg_size());
8332 
8333   const Value *SwiftErrorVal = nullptr;
8334   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8335 
8336   if (isTailCall) {
8337     // Avoid emitting tail calls in functions with the disable-tail-calls
8338     // attribute.
8339     auto *Caller = CB.getParent()->getParent();
8340     if (Caller->getFnAttribute("disable-tail-calls").getValueAsString() ==
8341         "true" && !isMustTailCall)
8342       isTailCall = false;
8343 
8344     // We can't tail call inside a function with a swifterror argument. Lowering
8345     // does not support this yet. It would have to move into the swifterror
8346     // register before the call.
8347     if (TLI.supportSwiftError() &&
8348         Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
8349       isTailCall = false;
8350   }
8351 
8352   for (auto I = CB.arg_begin(), E = CB.arg_end(); I != E; ++I) {
8353     TargetLowering::ArgListEntry Entry;
8354     const Value *V = *I;
8355 
8356     // Skip empty types
8357     if (V->getType()->isEmptyTy())
8358       continue;
8359 
8360     SDValue ArgNode = getValue(V);
8361     Entry.Node = ArgNode; Entry.Ty = V->getType();
8362 
8363     Entry.setAttributes(&CB, I - CB.arg_begin());
8364 
8365     // Use swifterror virtual register as input to the call.
8366     if (Entry.IsSwiftError && TLI.supportSwiftError()) {
8367       SwiftErrorVal = V;
8368       // We find the virtual register for the actual swifterror argument.
8369       // Instead of using the Value, we use the virtual register instead.
8370       Entry.Node =
8371           DAG.getRegister(SwiftError.getOrCreateVRegUseAt(&CB, FuncInfo.MBB, V),
8372                           EVT(TLI.getPointerTy(DL)));
8373     }
8374 
8375     Args.push_back(Entry);
8376 
8377     // If we have an explicit sret argument that is an Instruction, (i.e., it
8378     // might point to function-local memory), we can't meaningfully tail-call.
8379     if (Entry.IsSRet && isa<Instruction>(V))
8380       isTailCall = false;
8381   }
8382 
8383   // If call site has a cfguardtarget operand bundle, create and add an
8384   // additional ArgListEntry.
8385   if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_cfguardtarget)) {
8386     TargetLowering::ArgListEntry Entry;
8387     Value *V = Bundle->Inputs[0];
8388     SDValue ArgNode = getValue(V);
8389     Entry.Node = ArgNode;
8390     Entry.Ty = V->getType();
8391     Entry.IsCFGuardTarget = true;
8392     Args.push_back(Entry);
8393   }
8394 
8395   // Check if target-independent constraints permit a tail call here.
8396   // Target-dependent constraints are checked within TLI->LowerCallTo.
8397   if (isTailCall && !isInTailCallPosition(CB, DAG.getTarget()))
8398     isTailCall = false;
8399 
8400   // Disable tail calls if there is an swifterror argument. Targets have not
8401   // been updated to support tail calls.
8402   if (TLI.supportSwiftError() && SwiftErrorVal)
8403     isTailCall = false;
8404 
8405   ConstantInt *CFIType = nullptr;
8406   if (CB.isIndirectCall()) {
8407     if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_kcfi)) {
8408       if (!TLI.supportKCFIBundles())
8409         report_fatal_error(
8410             "Target doesn't support calls with kcfi operand bundles.");
8411       CFIType = cast<ConstantInt>(Bundle->Inputs[0]);
8412       assert(CFIType->getType()->isIntegerTy(32) && "Invalid CFI type");
8413     }
8414   }
8415 
8416   TargetLowering::CallLoweringInfo CLI(DAG);
8417   CLI.setDebugLoc(getCurSDLoc())
8418       .setChain(getRoot())
8419       .setCallee(RetTy, FTy, Callee, std::move(Args), CB)
8420       .setTailCall(isTailCall)
8421       .setConvergent(CB.isConvergent())
8422       .setIsPreallocated(
8423           CB.countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0)
8424       .setCFIType(CFIType);
8425   std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
8426 
8427   if (Result.first.getNode()) {
8428     Result.first = lowerRangeToAssertZExt(DAG, CB, Result.first);
8429     setValue(&CB, Result.first);
8430   }
8431 
8432   // The last element of CLI.InVals has the SDValue for swifterror return.
8433   // Here we copy it to a virtual register and update SwiftErrorMap for
8434   // book-keeping.
8435   if (SwiftErrorVal && TLI.supportSwiftError()) {
8436     // Get the last element of InVals.
8437     SDValue Src = CLI.InVals.back();
8438     Register VReg =
8439         SwiftError.getOrCreateVRegDefAt(&CB, FuncInfo.MBB, SwiftErrorVal);
8440     SDValue CopyNode = CLI.DAG.getCopyToReg(Result.second, CLI.DL, VReg, Src);
8441     DAG.setRoot(CopyNode);
8442   }
8443 }
8444 
8445 static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
8446                              SelectionDAGBuilder &Builder) {
8447   // Check to see if this load can be trivially constant folded, e.g. if the
8448   // input is from a string literal.
8449   if (const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) {
8450     // Cast pointer to the type we really want to load.
8451     Type *LoadTy =
8452         Type::getIntNTy(PtrVal->getContext(), LoadVT.getScalarSizeInBits());
8453     if (LoadVT.isVector())
8454       LoadTy = FixedVectorType::get(LoadTy, LoadVT.getVectorNumElements());
8455 
8456     LoadInput = ConstantExpr::getBitCast(const_cast<Constant *>(LoadInput),
8457                                          PointerType::getUnqual(LoadTy));
8458 
8459     if (const Constant *LoadCst =
8460             ConstantFoldLoadFromConstPtr(const_cast<Constant *>(LoadInput),
8461                                          LoadTy, Builder.DAG.getDataLayout()))
8462       return Builder.getValue(LoadCst);
8463   }
8464 
8465   // Otherwise, we have to emit the load.  If the pointer is to unfoldable but
8466   // still constant memory, the input chain can be the entry node.
8467   SDValue Root;
8468   bool ConstantMemory = false;
8469 
8470   // Do not serialize (non-volatile) loads of constant memory with anything.
8471   if (Builder.AA && Builder.AA->pointsToConstantMemory(PtrVal)) {
8472     Root = Builder.DAG.getEntryNode();
8473     ConstantMemory = true;
8474   } else {
8475     // Do not serialize non-volatile loads against each other.
8476     Root = Builder.DAG.getRoot();
8477   }
8478 
8479   SDValue Ptr = Builder.getValue(PtrVal);
8480   SDValue LoadVal =
8481       Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root, Ptr,
8482                           MachinePointerInfo(PtrVal), Align(1));
8483 
8484   if (!ConstantMemory)
8485     Builder.PendingLoads.push_back(LoadVal.getValue(1));
8486   return LoadVal;
8487 }
8488 
8489 /// Record the value for an instruction that produces an integer result,
8490 /// converting the type where necessary.
8491 void SelectionDAGBuilder::processIntegerCallValue(const Instruction &I,
8492                                                   SDValue Value,
8493                                                   bool IsSigned) {
8494   EVT VT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
8495                                                     I.getType(), true);
8496   Value = DAG.getExtOrTrunc(IsSigned, Value, getCurSDLoc(), VT);
8497   setValue(&I, Value);
8498 }
8499 
8500 /// See if we can lower a memcmp/bcmp call into an optimized form. If so, return
8501 /// true and lower it. Otherwise return false, and it will be lowered like a
8502 /// normal call.
8503 /// The caller already checked that \p I calls the appropriate LibFunc with a
8504 /// correct prototype.
8505 bool SelectionDAGBuilder::visitMemCmpBCmpCall(const CallInst &I) {
8506   const Value *LHS = I.getArgOperand(0), *RHS = I.getArgOperand(1);
8507   const Value *Size = I.getArgOperand(2);
8508   const ConstantSDNode *CSize = dyn_cast<ConstantSDNode>(getValue(Size));
8509   if (CSize && CSize->getZExtValue() == 0) {
8510     EVT CallVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
8511                                                           I.getType(), true);
8512     setValue(&I, DAG.getConstant(0, getCurSDLoc(), CallVT));
8513     return true;
8514   }
8515 
8516   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
8517   std::pair<SDValue, SDValue> Res = TSI.EmitTargetCodeForMemcmp(
8518       DAG, getCurSDLoc(), DAG.getRoot(), getValue(LHS), getValue(RHS),
8519       getValue(Size), MachinePointerInfo(LHS), MachinePointerInfo(RHS));
8520   if (Res.first.getNode()) {
8521     processIntegerCallValue(I, Res.first, true);
8522     PendingLoads.push_back(Res.second);
8523     return true;
8524   }
8525 
8526   // memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS)  != 0
8527   // memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS)  != 0
8528   if (!CSize || !isOnlyUsedInZeroEqualityComparison(&I))
8529     return false;
8530 
8531   // If the target has a fast compare for the given size, it will return a
8532   // preferred load type for that size. Require that the load VT is legal and
8533   // that the target supports unaligned loads of that type. Otherwise, return
8534   // INVALID.
8535   auto hasFastLoadsAndCompare = [&](unsigned NumBits) {
8536     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8537     MVT LVT = TLI.hasFastEqualityCompare(NumBits);
8538     if (LVT != MVT::INVALID_SIMPLE_VALUE_TYPE) {
8539       // TODO: Handle 5 byte compare as 4-byte + 1 byte.
8540       // TODO: Handle 8 byte compare on x86-32 as two 32-bit loads.
8541       // TODO: Check alignment of src and dest ptrs.
8542       unsigned DstAS = LHS->getType()->getPointerAddressSpace();
8543       unsigned SrcAS = RHS->getType()->getPointerAddressSpace();
8544       if (!TLI.isTypeLegal(LVT) ||
8545           !TLI.allowsMisalignedMemoryAccesses(LVT, SrcAS) ||
8546           !TLI.allowsMisalignedMemoryAccesses(LVT, DstAS))
8547         LVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
8548     }
8549 
8550     return LVT;
8551   };
8552 
8553   // This turns into unaligned loads. We only do this if the target natively
8554   // supports the MVT we'll be loading or if it is small enough (<= 4) that
8555   // we'll only produce a small number of byte loads.
8556   MVT LoadVT;
8557   unsigned NumBitsToCompare = CSize->getZExtValue() * 8;
8558   switch (NumBitsToCompare) {
8559   default:
8560     return false;
8561   case 16:
8562     LoadVT = MVT::i16;
8563     break;
8564   case 32:
8565     LoadVT = MVT::i32;
8566     break;
8567   case 64:
8568   case 128:
8569   case 256:
8570     LoadVT = hasFastLoadsAndCompare(NumBitsToCompare);
8571     break;
8572   }
8573 
8574   if (LoadVT == MVT::INVALID_SIMPLE_VALUE_TYPE)
8575     return false;
8576 
8577   SDValue LoadL = getMemCmpLoad(LHS, LoadVT, *this);
8578   SDValue LoadR = getMemCmpLoad(RHS, LoadVT, *this);
8579 
8580   // Bitcast to a wide integer type if the loads are vectors.
8581   if (LoadVT.isVector()) {
8582     EVT CmpVT = EVT::getIntegerVT(LHS->getContext(), LoadVT.getSizeInBits());
8583     LoadL = DAG.getBitcast(CmpVT, LoadL);
8584     LoadR = DAG.getBitcast(CmpVT, LoadR);
8585   }
8586 
8587   SDValue Cmp = DAG.getSetCC(getCurSDLoc(), MVT::i1, LoadL, LoadR, ISD::SETNE);
8588   processIntegerCallValue(I, Cmp, false);
8589   return true;
8590 }
8591 
8592 /// See if we can lower a memchr call into an optimized form. If so, return
8593 /// true and lower it. Otherwise return false, and it will be lowered like a
8594 /// normal call.
8595 /// The caller already checked that \p I calls the appropriate LibFunc with a
8596 /// correct prototype.
8597 bool SelectionDAGBuilder::visitMemChrCall(const CallInst &I) {
8598   const Value *Src = I.getArgOperand(0);
8599   const Value *Char = I.getArgOperand(1);
8600   const Value *Length = I.getArgOperand(2);
8601 
8602   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
8603   std::pair<SDValue, SDValue> Res =
8604     TSI.EmitTargetCodeForMemchr(DAG, getCurSDLoc(), DAG.getRoot(),
8605                                 getValue(Src), getValue(Char), getValue(Length),
8606                                 MachinePointerInfo(Src));
8607   if (Res.first.getNode()) {
8608     setValue(&I, Res.first);
8609     PendingLoads.push_back(Res.second);
8610     return true;
8611   }
8612 
8613   return false;
8614 }
8615 
8616 /// See if we can lower a mempcpy call into an optimized form. If so, return
8617 /// true and lower it. Otherwise return false, and it will be lowered like a
8618 /// normal call.
8619 /// The caller already checked that \p I calls the appropriate LibFunc with a
8620 /// correct prototype.
8621 bool SelectionDAGBuilder::visitMemPCpyCall(const CallInst &I) {
8622   SDValue Dst = getValue(I.getArgOperand(0));
8623   SDValue Src = getValue(I.getArgOperand(1));
8624   SDValue Size = getValue(I.getArgOperand(2));
8625 
8626   Align DstAlign = DAG.InferPtrAlign(Dst).valueOrOne();
8627   Align SrcAlign = DAG.InferPtrAlign(Src).valueOrOne();
8628   // DAG::getMemcpy needs Alignment to be defined.
8629   Align Alignment = std::min(DstAlign, SrcAlign);
8630 
8631   SDLoc sdl = getCurSDLoc();
8632 
8633   // In the mempcpy context we need to pass in a false value for isTailCall
8634   // because the return pointer needs to be adjusted by the size of
8635   // the copied memory.
8636   SDValue Root = getMemoryRoot();
8637   SDValue MC = DAG.getMemcpy(Root, sdl, Dst, Src, Size, Alignment, false, false,
8638                              /*isTailCall=*/false,
8639                              MachinePointerInfo(I.getArgOperand(0)),
8640                              MachinePointerInfo(I.getArgOperand(1)),
8641                              I.getAAMetadata());
8642   assert(MC.getNode() != nullptr &&
8643          "** memcpy should not be lowered as TailCall in mempcpy context **");
8644   DAG.setRoot(MC);
8645 
8646   // Check if Size needs to be truncated or extended.
8647   Size = DAG.getSExtOrTrunc(Size, sdl, Dst.getValueType());
8648 
8649   // Adjust return pointer to point just past the last dst byte.
8650   SDValue DstPlusSize = DAG.getNode(ISD::ADD, sdl, Dst.getValueType(),
8651                                     Dst, Size);
8652   setValue(&I, DstPlusSize);
8653   return true;
8654 }
8655 
8656 /// See if we can lower a strcpy call into an optimized form.  If so, return
8657 /// true and lower it, otherwise return false and it will be lowered like a
8658 /// normal call.
8659 /// The caller already checked that \p I calls the appropriate LibFunc with a
8660 /// correct prototype.
8661 bool SelectionDAGBuilder::visitStrCpyCall(const CallInst &I, bool isStpcpy) {
8662   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
8663 
8664   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
8665   std::pair<SDValue, SDValue> Res =
8666     TSI.EmitTargetCodeForStrcpy(DAG, getCurSDLoc(), getRoot(),
8667                                 getValue(Arg0), getValue(Arg1),
8668                                 MachinePointerInfo(Arg0),
8669                                 MachinePointerInfo(Arg1), isStpcpy);
8670   if (Res.first.getNode()) {
8671     setValue(&I, Res.first);
8672     DAG.setRoot(Res.second);
8673     return true;
8674   }
8675 
8676   return false;
8677 }
8678 
8679 /// See if we can lower a strcmp call into an optimized form.  If so, return
8680 /// true and lower it, otherwise return false and it will be lowered like a
8681 /// normal call.
8682 /// The caller already checked that \p I calls the appropriate LibFunc with a
8683 /// correct prototype.
8684 bool SelectionDAGBuilder::visitStrCmpCall(const CallInst &I) {
8685   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
8686 
8687   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
8688   std::pair<SDValue, SDValue> Res =
8689     TSI.EmitTargetCodeForStrcmp(DAG, getCurSDLoc(), DAG.getRoot(),
8690                                 getValue(Arg0), getValue(Arg1),
8691                                 MachinePointerInfo(Arg0),
8692                                 MachinePointerInfo(Arg1));
8693   if (Res.first.getNode()) {
8694     processIntegerCallValue(I, Res.first, true);
8695     PendingLoads.push_back(Res.second);
8696     return true;
8697   }
8698 
8699   return false;
8700 }
8701 
8702 /// See if we can lower a strlen call into an optimized form.  If so, return
8703 /// true and lower it, otherwise return false and it will be lowered like a
8704 /// normal call.
8705 /// The caller already checked that \p I calls the appropriate LibFunc with a
8706 /// correct prototype.
8707 bool SelectionDAGBuilder::visitStrLenCall(const CallInst &I) {
8708   const Value *Arg0 = I.getArgOperand(0);
8709 
8710   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
8711   std::pair<SDValue, SDValue> Res =
8712     TSI.EmitTargetCodeForStrlen(DAG, getCurSDLoc(), DAG.getRoot(),
8713                                 getValue(Arg0), MachinePointerInfo(Arg0));
8714   if (Res.first.getNode()) {
8715     processIntegerCallValue(I, Res.first, false);
8716     PendingLoads.push_back(Res.second);
8717     return true;
8718   }
8719 
8720   return false;
8721 }
8722 
8723 /// See if we can lower a strnlen call into an optimized form.  If so, return
8724 /// true and lower it, otherwise return false and it will be lowered like a
8725 /// normal call.
8726 /// The caller already checked that \p I calls the appropriate LibFunc with a
8727 /// correct prototype.
8728 bool SelectionDAGBuilder::visitStrNLenCall(const CallInst &I) {
8729   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
8730 
8731   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
8732   std::pair<SDValue, SDValue> Res =
8733     TSI.EmitTargetCodeForStrnlen(DAG, getCurSDLoc(), DAG.getRoot(),
8734                                  getValue(Arg0), getValue(Arg1),
8735                                  MachinePointerInfo(Arg0));
8736   if (Res.first.getNode()) {
8737     processIntegerCallValue(I, Res.first, false);
8738     PendingLoads.push_back(Res.second);
8739     return true;
8740   }
8741 
8742   return false;
8743 }
8744 
8745 /// See if we can lower a unary floating-point operation into an SDNode with
8746 /// the specified Opcode.  If so, return true and lower it, otherwise return
8747 /// false and it will be lowered like a normal call.
8748 /// The caller already checked that \p I calls the appropriate LibFunc with a
8749 /// correct prototype.
8750 bool SelectionDAGBuilder::visitUnaryFloatCall(const CallInst &I,
8751                                               unsigned Opcode) {
8752   // We already checked this call's prototype; verify it doesn't modify errno.
8753   if (!I.onlyReadsMemory())
8754     return false;
8755 
8756   SDNodeFlags Flags;
8757   Flags.copyFMF(cast<FPMathOperator>(I));
8758 
8759   SDValue Tmp = getValue(I.getArgOperand(0));
8760   setValue(&I,
8761            DAG.getNode(Opcode, getCurSDLoc(), Tmp.getValueType(), Tmp, Flags));
8762   return true;
8763 }
8764 
8765 /// See if we can lower a binary floating-point operation into an SDNode with
8766 /// the specified Opcode. If so, return true and lower it. Otherwise return
8767 /// false, and it will be lowered like a normal call.
8768 /// The caller already checked that \p I calls the appropriate LibFunc with a
8769 /// correct prototype.
8770 bool SelectionDAGBuilder::visitBinaryFloatCall(const CallInst &I,
8771                                                unsigned Opcode) {
8772   // We already checked this call's prototype; verify it doesn't modify errno.
8773   if (!I.onlyReadsMemory())
8774     return false;
8775 
8776   SDNodeFlags Flags;
8777   Flags.copyFMF(cast<FPMathOperator>(I));
8778 
8779   SDValue Tmp0 = getValue(I.getArgOperand(0));
8780   SDValue Tmp1 = getValue(I.getArgOperand(1));
8781   EVT VT = Tmp0.getValueType();
8782   setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), VT, Tmp0, Tmp1, Flags));
8783   return true;
8784 }
8785 
8786 void SelectionDAGBuilder::visitCall(const CallInst &I) {
8787   // Handle inline assembly differently.
8788   if (I.isInlineAsm()) {
8789     visitInlineAsm(I);
8790     return;
8791   }
8792 
8793   diagnoseDontCall(I);
8794 
8795   if (Function *F = I.getCalledFunction()) {
8796     if (F->isDeclaration()) {
8797       // Is this an LLVM intrinsic or a target-specific intrinsic?
8798       unsigned IID = F->getIntrinsicID();
8799       if (!IID)
8800         if (const TargetIntrinsicInfo *II = TM.getIntrinsicInfo())
8801           IID = II->getIntrinsicID(F);
8802 
8803       if (IID) {
8804         visitIntrinsicCall(I, IID);
8805         return;
8806       }
8807     }
8808 
8809     // Check for well-known libc/libm calls.  If the function is internal, it
8810     // can't be a library call.  Don't do the check if marked as nobuiltin for
8811     // some reason or the call site requires strict floating point semantics.
8812     LibFunc Func;
8813     if (!I.isNoBuiltin() && !I.isStrictFP() && !F->hasLocalLinkage() &&
8814         F->hasName() && LibInfo->getLibFunc(*F, Func) &&
8815         LibInfo->hasOptimizedCodeGen(Func)) {
8816       switch (Func) {
8817       default: break;
8818       case LibFunc_bcmp:
8819         if (visitMemCmpBCmpCall(I))
8820           return;
8821         break;
8822       case LibFunc_copysign:
8823       case LibFunc_copysignf:
8824       case LibFunc_copysignl:
8825         // We already checked this call's prototype; verify it doesn't modify
8826         // errno.
8827         if (I.onlyReadsMemory()) {
8828           SDValue LHS = getValue(I.getArgOperand(0));
8829           SDValue RHS = getValue(I.getArgOperand(1));
8830           setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurSDLoc(),
8831                                    LHS.getValueType(), LHS, RHS));
8832           return;
8833         }
8834         break;
8835       case LibFunc_fabs:
8836       case LibFunc_fabsf:
8837       case LibFunc_fabsl:
8838         if (visitUnaryFloatCall(I, ISD::FABS))
8839           return;
8840         break;
8841       case LibFunc_fmin:
8842       case LibFunc_fminf:
8843       case LibFunc_fminl:
8844         if (visitBinaryFloatCall(I, ISD::FMINNUM))
8845           return;
8846         break;
8847       case LibFunc_fmax:
8848       case LibFunc_fmaxf:
8849       case LibFunc_fmaxl:
8850         if (visitBinaryFloatCall(I, ISD::FMAXNUM))
8851           return;
8852         break;
8853       case LibFunc_sin:
8854       case LibFunc_sinf:
8855       case LibFunc_sinl:
8856         if (visitUnaryFloatCall(I, ISD::FSIN))
8857           return;
8858         break;
8859       case LibFunc_cos:
8860       case LibFunc_cosf:
8861       case LibFunc_cosl:
8862         if (visitUnaryFloatCall(I, ISD::FCOS))
8863           return;
8864         break;
8865       case LibFunc_sqrt:
8866       case LibFunc_sqrtf:
8867       case LibFunc_sqrtl:
8868       case LibFunc_sqrt_finite:
8869       case LibFunc_sqrtf_finite:
8870       case LibFunc_sqrtl_finite:
8871         if (visitUnaryFloatCall(I, ISD::FSQRT))
8872           return;
8873         break;
8874       case LibFunc_floor:
8875       case LibFunc_floorf:
8876       case LibFunc_floorl:
8877         if (visitUnaryFloatCall(I, ISD::FFLOOR))
8878           return;
8879         break;
8880       case LibFunc_nearbyint:
8881       case LibFunc_nearbyintf:
8882       case LibFunc_nearbyintl:
8883         if (visitUnaryFloatCall(I, ISD::FNEARBYINT))
8884           return;
8885         break;
8886       case LibFunc_ceil:
8887       case LibFunc_ceilf:
8888       case LibFunc_ceill:
8889         if (visitUnaryFloatCall(I, ISD::FCEIL))
8890           return;
8891         break;
8892       case LibFunc_rint:
8893       case LibFunc_rintf:
8894       case LibFunc_rintl:
8895         if (visitUnaryFloatCall(I, ISD::FRINT))
8896           return;
8897         break;
8898       case LibFunc_round:
8899       case LibFunc_roundf:
8900       case LibFunc_roundl:
8901         if (visitUnaryFloatCall(I, ISD::FROUND))
8902           return;
8903         break;
8904       case LibFunc_trunc:
8905       case LibFunc_truncf:
8906       case LibFunc_truncl:
8907         if (visitUnaryFloatCall(I, ISD::FTRUNC))
8908           return;
8909         break;
8910       case LibFunc_log2:
8911       case LibFunc_log2f:
8912       case LibFunc_log2l:
8913         if (visitUnaryFloatCall(I, ISD::FLOG2))
8914           return;
8915         break;
8916       case LibFunc_exp2:
8917       case LibFunc_exp2f:
8918       case LibFunc_exp2l:
8919         if (visitUnaryFloatCall(I, ISD::FEXP2))
8920           return;
8921         break;
8922       case LibFunc_exp10:
8923       case LibFunc_exp10f:
8924       case LibFunc_exp10l:
8925         if (visitUnaryFloatCall(I, ISD::FEXP10))
8926           return;
8927         break;
8928       case LibFunc_ldexp:
8929       case LibFunc_ldexpf:
8930       case LibFunc_ldexpl:
8931         if (visitBinaryFloatCall(I, ISD::FLDEXP))
8932           return;
8933         break;
8934       case LibFunc_memcmp:
8935         if (visitMemCmpBCmpCall(I))
8936           return;
8937         break;
8938       case LibFunc_mempcpy:
8939         if (visitMemPCpyCall(I))
8940           return;
8941         break;
8942       case LibFunc_memchr:
8943         if (visitMemChrCall(I))
8944           return;
8945         break;
8946       case LibFunc_strcpy:
8947         if (visitStrCpyCall(I, false))
8948           return;
8949         break;
8950       case LibFunc_stpcpy:
8951         if (visitStrCpyCall(I, true))
8952           return;
8953         break;
8954       case LibFunc_strcmp:
8955         if (visitStrCmpCall(I))
8956           return;
8957         break;
8958       case LibFunc_strlen:
8959         if (visitStrLenCall(I))
8960           return;
8961         break;
8962       case LibFunc_strnlen:
8963         if (visitStrNLenCall(I))
8964           return;
8965         break;
8966       }
8967     }
8968   }
8969 
8970   // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
8971   // have to do anything here to lower funclet bundles.
8972   // CFGuardTarget bundles are lowered in LowerCallTo.
8973   assert(!I.hasOperandBundlesOtherThan(
8974              {LLVMContext::OB_deopt, LLVMContext::OB_funclet,
8975               LLVMContext::OB_cfguardtarget, LLVMContext::OB_preallocated,
8976               LLVMContext::OB_clang_arc_attachedcall, LLVMContext::OB_kcfi}) &&
8977          "Cannot lower calls with arbitrary operand bundles!");
8978 
8979   SDValue Callee = getValue(I.getCalledOperand());
8980 
8981   if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
8982     LowerCallSiteWithDeoptBundle(&I, Callee, nullptr);
8983   else
8984     // Check if we can potentially perform a tail call. More detailed checking
8985     // is be done within LowerCallTo, after more information about the call is
8986     // known.
8987     LowerCallTo(I, Callee, I.isTailCall(), I.isMustTailCall());
8988 }
8989 
8990 namespace {
8991 
8992 /// AsmOperandInfo - This contains information for each constraint that we are
8993 /// lowering.
8994 class SDISelAsmOperandInfo : public TargetLowering::AsmOperandInfo {
8995 public:
8996   /// CallOperand - If this is the result output operand or a clobber
8997   /// this is null, otherwise it is the incoming operand to the CallInst.
8998   /// This gets modified as the asm is processed.
8999   SDValue CallOperand;
9000 
9001   /// AssignedRegs - If this is a register or register class operand, this
9002   /// contains the set of register corresponding to the operand.
9003   RegsForValue AssignedRegs;
9004 
9005   explicit SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &info)
9006     : TargetLowering::AsmOperandInfo(info), CallOperand(nullptr, 0) {
9007   }
9008 
9009   /// Whether or not this operand accesses memory
9010   bool hasMemory(const TargetLowering &TLI) const {
9011     // Indirect operand accesses access memory.
9012     if (isIndirect)
9013       return true;
9014 
9015     for (const auto &Code : Codes)
9016       if (TLI.getConstraintType(Code) == TargetLowering::C_Memory)
9017         return true;
9018 
9019     return false;
9020   }
9021 };
9022 
9023 
9024 } // end anonymous namespace
9025 
9026 /// Make sure that the output operand \p OpInfo and its corresponding input
9027 /// operand \p MatchingOpInfo have compatible constraint types (otherwise error
9028 /// out).
9029 static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo,
9030                                SDISelAsmOperandInfo &MatchingOpInfo,
9031                                SelectionDAG &DAG) {
9032   if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT)
9033     return;
9034 
9035   const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
9036   const auto &TLI = DAG.getTargetLoweringInfo();
9037 
9038   std::pair<unsigned, const TargetRegisterClass *> MatchRC =
9039       TLI.getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode,
9040                                        OpInfo.ConstraintVT);
9041   std::pair<unsigned, const TargetRegisterClass *> InputRC =
9042       TLI.getRegForInlineAsmConstraint(TRI, MatchingOpInfo.ConstraintCode,
9043                                        MatchingOpInfo.ConstraintVT);
9044   if ((OpInfo.ConstraintVT.isInteger() !=
9045        MatchingOpInfo.ConstraintVT.isInteger()) ||
9046       (MatchRC.second != InputRC.second)) {
9047     // FIXME: error out in a more elegant fashion
9048     report_fatal_error("Unsupported asm: input constraint"
9049                        " with a matching output constraint of"
9050                        " incompatible type!");
9051   }
9052   MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT;
9053 }
9054 
9055 /// Get a direct memory input to behave well as an indirect operand.
9056 /// This may introduce stores, hence the need for a \p Chain.
9057 /// \return The (possibly updated) chain.
9058 static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location,
9059                                         SDISelAsmOperandInfo &OpInfo,
9060                                         SelectionDAG &DAG) {
9061   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9062 
9063   // If we don't have an indirect input, put it in the constpool if we can,
9064   // otherwise spill it to a stack slot.
9065   // TODO: This isn't quite right. We need to handle these according to
9066   // the addressing mode that the constraint wants. Also, this may take
9067   // an additional register for the computation and we don't want that
9068   // either.
9069 
9070   // If the operand is a float, integer, or vector constant, spill to a
9071   // constant pool entry to get its address.
9072   const Value *OpVal = OpInfo.CallOperandVal;
9073   if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
9074       isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) {
9075     OpInfo.CallOperand = DAG.getConstantPool(
9076         cast<Constant>(OpVal), TLI.getPointerTy(DAG.getDataLayout()));
9077     return Chain;
9078   }
9079 
9080   // Otherwise, create a stack slot and emit a store to it before the asm.
9081   Type *Ty = OpVal->getType();
9082   auto &DL = DAG.getDataLayout();
9083   uint64_t TySize = DL.getTypeAllocSize(Ty);
9084   MachineFunction &MF = DAG.getMachineFunction();
9085   int SSFI = MF.getFrameInfo().CreateStackObject(
9086       TySize, DL.getPrefTypeAlign(Ty), false);
9087   SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getFrameIndexTy(DL));
9088   Chain = DAG.getTruncStore(Chain, Location, OpInfo.CallOperand, StackSlot,
9089                             MachinePointerInfo::getFixedStack(MF, SSFI),
9090                             TLI.getMemValueType(DL, Ty));
9091   OpInfo.CallOperand = StackSlot;
9092 
9093   return Chain;
9094 }
9095 
9096 /// GetRegistersForValue - Assign registers (virtual or physical) for the
9097 /// specified operand.  We prefer to assign virtual registers, to allow the
9098 /// register allocator to handle the assignment process.  However, if the asm
9099 /// uses features that we can't model on machineinstrs, we have SDISel do the
9100 /// allocation.  This produces generally horrible, but correct, code.
9101 ///
9102 ///   OpInfo describes the operand
9103 ///   RefOpInfo describes the matching operand if any, the operand otherwise
9104 static std::optional<unsigned>
9105 getRegistersForValue(SelectionDAG &DAG, const SDLoc &DL,
9106                      SDISelAsmOperandInfo &OpInfo,
9107                      SDISelAsmOperandInfo &RefOpInfo) {
9108   LLVMContext &Context = *DAG.getContext();
9109   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9110 
9111   MachineFunction &MF = DAG.getMachineFunction();
9112   SmallVector<unsigned, 4> Regs;
9113   const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
9114 
9115   // No work to do for memory/address operands.
9116   if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
9117       OpInfo.ConstraintType == TargetLowering::C_Address)
9118     return std::nullopt;
9119 
9120   // If this is a constraint for a single physreg, or a constraint for a
9121   // register class, find it.
9122   unsigned AssignedReg;
9123   const TargetRegisterClass *RC;
9124   std::tie(AssignedReg, RC) = TLI.getRegForInlineAsmConstraint(
9125       &TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT);
9126   // RC is unset only on failure. Return immediately.
9127   if (!RC)
9128     return std::nullopt;
9129 
9130   // Get the actual register value type.  This is important, because the user
9131   // may have asked for (e.g.) the AX register in i32 type.  We need to
9132   // remember that AX is actually i16 to get the right extension.
9133   const MVT RegVT = *TRI.legalclasstypes_begin(*RC);
9134 
9135   if (OpInfo.ConstraintVT != MVT::Other && RegVT != MVT::Untyped) {
9136     // If this is an FP operand in an integer register (or visa versa), or more
9137     // generally if the operand value disagrees with the register class we plan
9138     // to stick it in, fix the operand type.
9139     //
9140     // If this is an input value, the bitcast to the new type is done now.
9141     // Bitcast for output value is done at the end of visitInlineAsm().
9142     if ((OpInfo.Type == InlineAsm::isOutput ||
9143          OpInfo.Type == InlineAsm::isInput) &&
9144         !TRI.isTypeLegalForClass(*RC, OpInfo.ConstraintVT)) {
9145       // Try to convert to the first EVT that the reg class contains.  If the
9146       // types are identical size, use a bitcast to convert (e.g. two differing
9147       // vector types).  Note: output bitcast is done at the end of
9148       // visitInlineAsm().
9149       if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
9150         // Exclude indirect inputs while they are unsupported because the code
9151         // to perform the load is missing and thus OpInfo.CallOperand still
9152         // refers to the input address rather than the pointed-to value.
9153         if (OpInfo.Type == InlineAsm::isInput && !OpInfo.isIndirect)
9154           OpInfo.CallOperand =
9155               DAG.getNode(ISD::BITCAST, DL, RegVT, OpInfo.CallOperand);
9156         OpInfo.ConstraintVT = RegVT;
9157         // If the operand is an FP value and we want it in integer registers,
9158         // use the corresponding integer type. This turns an f64 value into
9159         // i64, which can be passed with two i32 values on a 32-bit machine.
9160       } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
9161         MVT VT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits());
9162         if (OpInfo.Type == InlineAsm::isInput)
9163           OpInfo.CallOperand =
9164               DAG.getNode(ISD::BITCAST, DL, VT, OpInfo.CallOperand);
9165         OpInfo.ConstraintVT = VT;
9166       }
9167     }
9168   }
9169 
9170   // No need to allocate a matching input constraint since the constraint it's
9171   // matching to has already been allocated.
9172   if (OpInfo.isMatchingInputConstraint())
9173     return std::nullopt;
9174 
9175   EVT ValueVT = OpInfo.ConstraintVT;
9176   if (OpInfo.ConstraintVT == MVT::Other)
9177     ValueVT = RegVT;
9178 
9179   // Initialize NumRegs.
9180   unsigned NumRegs = 1;
9181   if (OpInfo.ConstraintVT != MVT::Other)
9182     NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT, RegVT);
9183 
9184   // If this is a constraint for a specific physical register, like {r17},
9185   // assign it now.
9186 
9187   // If this associated to a specific register, initialize iterator to correct
9188   // place. If virtual, make sure we have enough registers
9189 
9190   // Initialize iterator if necessary
9191   TargetRegisterClass::iterator I = RC->begin();
9192   MachineRegisterInfo &RegInfo = MF.getRegInfo();
9193 
9194   // Do not check for single registers.
9195   if (AssignedReg) {
9196     I = std::find(I, RC->end(), AssignedReg);
9197     if (I == RC->end()) {
9198       // RC does not contain the selected register, which indicates a
9199       // mismatch between the register and the required type/bitwidth.
9200       return {AssignedReg};
9201     }
9202   }
9203 
9204   for (; NumRegs; --NumRegs, ++I) {
9205     assert(I != RC->end() && "Ran out of registers to allocate!");
9206     Register R = AssignedReg ? Register(*I) : RegInfo.createVirtualRegister(RC);
9207     Regs.push_back(R);
9208   }
9209 
9210   OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
9211   return std::nullopt;
9212 }
9213 
9214 static unsigned
9215 findMatchingInlineAsmOperand(unsigned OperandNo,
9216                              const std::vector<SDValue> &AsmNodeOperands) {
9217   // Scan until we find the definition we already emitted of this operand.
9218   unsigned CurOp = InlineAsm::Op_FirstOperand;
9219   for (; OperandNo; --OperandNo) {
9220     // Advance to the next operand.
9221     unsigned OpFlag = AsmNodeOperands[CurOp]->getAsZExtVal();
9222     const InlineAsm::Flag F(OpFlag);
9223     assert(
9224         (F.isRegDefKind() || F.isRegDefEarlyClobberKind() || F.isMemKind()) &&
9225         "Skipped past definitions?");
9226     CurOp += F.getNumOperandRegisters() + 1;
9227   }
9228   return CurOp;
9229 }
9230 
9231 namespace {
9232 
9233 class ExtraFlags {
9234   unsigned Flags = 0;
9235 
9236 public:
9237   explicit ExtraFlags(const CallBase &Call) {
9238     const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
9239     if (IA->hasSideEffects())
9240       Flags |= InlineAsm::Extra_HasSideEffects;
9241     if (IA->isAlignStack())
9242       Flags |= InlineAsm::Extra_IsAlignStack;
9243     if (Call.isConvergent())
9244       Flags |= InlineAsm::Extra_IsConvergent;
9245     Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
9246   }
9247 
9248   void update(const TargetLowering::AsmOperandInfo &OpInfo) {
9249     // Ideally, we would only check against memory constraints.  However, the
9250     // meaning of an Other constraint can be target-specific and we can't easily
9251     // reason about it.  Therefore, be conservative and set MayLoad/MayStore
9252     // for Other constraints as well.
9253     if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
9254         OpInfo.ConstraintType == TargetLowering::C_Other) {
9255       if (OpInfo.Type == InlineAsm::isInput)
9256         Flags |= InlineAsm::Extra_MayLoad;
9257       else if (OpInfo.Type == InlineAsm::isOutput)
9258         Flags |= InlineAsm::Extra_MayStore;
9259       else if (OpInfo.Type == InlineAsm::isClobber)
9260         Flags |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore);
9261     }
9262   }
9263 
9264   unsigned get() const { return Flags; }
9265 };
9266 
9267 } // end anonymous namespace
9268 
9269 static bool isFunction(SDValue Op) {
9270   if (Op && Op.getOpcode() == ISD::GlobalAddress) {
9271     if (auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
9272       auto Fn = dyn_cast_or_null<Function>(GA->getGlobal());
9273 
9274       // In normal "call dllimport func" instruction (non-inlineasm) it force
9275       // indirect access by specifing call opcode. And usually specially print
9276       // asm with indirect symbol (i.g: "*") according to opcode. Inline asm can
9277       // not do in this way now. (In fact, this is similar with "Data Access"
9278       // action). So here we ignore dllimport function.
9279       if (Fn && !Fn->hasDLLImportStorageClass())
9280         return true;
9281     }
9282   }
9283   return false;
9284 }
9285 
9286 /// visitInlineAsm - Handle a call to an InlineAsm object.
9287 void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call,
9288                                          const BasicBlock *EHPadBB) {
9289   const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
9290 
9291   /// ConstraintOperands - Information about all of the constraints.
9292   SmallVector<SDISelAsmOperandInfo, 16> ConstraintOperands;
9293 
9294   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9295   TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints(
9296       DAG.getDataLayout(), DAG.getSubtarget().getRegisterInfo(), Call);
9297 
9298   // First Pass: Calculate HasSideEffects and ExtraFlags (AlignStack,
9299   // AsmDialect, MayLoad, MayStore).
9300   bool HasSideEffect = IA->hasSideEffects();
9301   ExtraFlags ExtraInfo(Call);
9302 
9303   for (auto &T : TargetConstraints) {
9304     ConstraintOperands.push_back(SDISelAsmOperandInfo(T));
9305     SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
9306 
9307     if (OpInfo.CallOperandVal)
9308       OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
9309 
9310     if (!HasSideEffect)
9311       HasSideEffect = OpInfo.hasMemory(TLI);
9312 
9313     // Determine if this InlineAsm MayLoad or MayStore based on the constraints.
9314     // FIXME: Could we compute this on OpInfo rather than T?
9315 
9316     // Compute the constraint code and ConstraintType to use.
9317     TLI.ComputeConstraintToUse(T, SDValue());
9318 
9319     if (T.ConstraintType == TargetLowering::C_Immediate &&
9320         OpInfo.CallOperand && !isa<ConstantSDNode>(OpInfo.CallOperand))
9321       // We've delayed emitting a diagnostic like the "n" constraint because
9322       // inlining could cause an integer showing up.
9323       return emitInlineAsmError(Call, "constraint '" + Twine(T.ConstraintCode) +
9324                                           "' expects an integer constant "
9325                                           "expression");
9326 
9327     ExtraInfo.update(T);
9328   }
9329 
9330   // We won't need to flush pending loads if this asm doesn't touch
9331   // memory and is nonvolatile.
9332   SDValue Glue, Chain = (HasSideEffect) ? getRoot() : DAG.getRoot();
9333 
9334   bool EmitEHLabels = isa<InvokeInst>(Call);
9335   if (EmitEHLabels) {
9336     assert(EHPadBB && "InvokeInst must have an EHPadBB");
9337   }
9338   bool IsCallBr = isa<CallBrInst>(Call);
9339 
9340   if (IsCallBr || EmitEHLabels) {
9341     // If this is a callbr or invoke we need to flush pending exports since
9342     // inlineasm_br and invoke are terminators.
9343     // We need to do this before nodes are glued to the inlineasm_br node.
9344     Chain = getControlRoot();
9345   }
9346 
9347   MCSymbol *BeginLabel = nullptr;
9348   if (EmitEHLabels) {
9349     Chain = lowerStartEH(Chain, EHPadBB, BeginLabel);
9350   }
9351 
9352   int OpNo = -1;
9353   SmallVector<StringRef> AsmStrs;
9354   IA->collectAsmStrs(AsmStrs);
9355 
9356   // Second pass over the constraints: compute which constraint option to use.
9357   for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
9358     if (OpInfo.hasArg() || OpInfo.Type == InlineAsm::isOutput)
9359       OpNo++;
9360 
9361     // If this is an output operand with a matching input operand, look up the
9362     // matching input. If their types mismatch, e.g. one is an integer, the
9363     // other is floating point, or their sizes are different, flag it as an
9364     // error.
9365     if (OpInfo.hasMatchingInput()) {
9366       SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
9367       patchMatchingInput(OpInfo, Input, DAG);
9368     }
9369 
9370     // Compute the constraint code and ConstraintType to use.
9371     TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
9372 
9373     if ((OpInfo.ConstraintType == TargetLowering::C_Memory &&
9374          OpInfo.Type == InlineAsm::isClobber) ||
9375         OpInfo.ConstraintType == TargetLowering::C_Address)
9376       continue;
9377 
9378     // In Linux PIC model, there are 4 cases about value/label addressing:
9379     //
9380     // 1: Function call or Label jmp inside the module.
9381     // 2: Data access (such as global variable, static variable) inside module.
9382     // 3: Function call or Label jmp outside the module.
9383     // 4: Data access (such as global variable) outside the module.
9384     //
9385     // Due to current llvm inline asm architecture designed to not "recognize"
9386     // the asm code, there are quite troubles for us to treat mem addressing
9387     // differently for same value/adress used in different instuctions.
9388     // For example, in pic model, call a func may in plt way or direclty
9389     // pc-related, but lea/mov a function adress may use got.
9390     //
9391     // Here we try to "recognize" function call for the case 1 and case 3 in
9392     // inline asm. And try to adjust the constraint for them.
9393     //
9394     // TODO: Due to current inline asm didn't encourage to jmp to the outsider
9395     // label, so here we don't handle jmp function label now, but we need to
9396     // enhance it (especilly in PIC model) if we meet meaningful requirements.
9397     if (OpInfo.isIndirect && isFunction(OpInfo.CallOperand) &&
9398         TLI.isInlineAsmTargetBranch(AsmStrs, OpNo) &&
9399         TM.getCodeModel() != CodeModel::Large) {
9400       OpInfo.isIndirect = false;
9401       OpInfo.ConstraintType = TargetLowering::C_Address;
9402     }
9403 
9404     // If this is a memory input, and if the operand is not indirect, do what we
9405     // need to provide an address for the memory input.
9406     if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
9407         !OpInfo.isIndirect) {
9408       assert((OpInfo.isMultipleAlternative ||
9409               (OpInfo.Type == InlineAsm::isInput)) &&
9410              "Can only indirectify direct input operands!");
9411 
9412       // Memory operands really want the address of the value.
9413       Chain = getAddressForMemoryInput(Chain, getCurSDLoc(), OpInfo, DAG);
9414 
9415       // There is no longer a Value* corresponding to this operand.
9416       OpInfo.CallOperandVal = nullptr;
9417 
9418       // It is now an indirect operand.
9419       OpInfo.isIndirect = true;
9420     }
9421 
9422   }
9423 
9424   // AsmNodeOperands - The operands for the ISD::INLINEASM node.
9425   std::vector<SDValue> AsmNodeOperands;
9426   AsmNodeOperands.push_back(SDValue());  // reserve space for input chain
9427   AsmNodeOperands.push_back(DAG.getTargetExternalSymbol(
9428       IA->getAsmString().c_str(), TLI.getProgramPointerTy(DAG.getDataLayout())));
9429 
9430   // If we have a !srcloc metadata node associated with it, we want to attach
9431   // this to the ultimately generated inline asm machineinstr.  To do this, we
9432   // pass in the third operand as this (potentially null) inline asm MDNode.
9433   const MDNode *SrcLoc = Call.getMetadata("srcloc");
9434   AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc));
9435 
9436   // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore
9437   // bits as operand 3.
9438   AsmNodeOperands.push_back(DAG.getTargetConstant(
9439       ExtraInfo.get(), getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
9440 
9441   // Third pass: Loop over operands to prepare DAG-level operands.. As part of
9442   // this, assign virtual and physical registers for inputs and otput.
9443   for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
9444     // Assign Registers.
9445     SDISelAsmOperandInfo &RefOpInfo =
9446         OpInfo.isMatchingInputConstraint()
9447             ? ConstraintOperands[OpInfo.getMatchedOperand()]
9448             : OpInfo;
9449     const auto RegError =
9450         getRegistersForValue(DAG, getCurSDLoc(), OpInfo, RefOpInfo);
9451     if (RegError) {
9452       const MachineFunction &MF = DAG.getMachineFunction();
9453       const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
9454       const char *RegName = TRI.getName(*RegError);
9455       emitInlineAsmError(Call, "register '" + Twine(RegName) +
9456                                    "' allocated for constraint '" +
9457                                    Twine(OpInfo.ConstraintCode) +
9458                                    "' does not match required type");
9459       return;
9460     }
9461 
9462     auto DetectWriteToReservedRegister = [&]() {
9463       const MachineFunction &MF = DAG.getMachineFunction();
9464       const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
9465       for (unsigned Reg : OpInfo.AssignedRegs.Regs) {
9466         if (Register::isPhysicalRegister(Reg) &&
9467             TRI.isInlineAsmReadOnlyReg(MF, Reg)) {
9468           const char *RegName = TRI.getName(Reg);
9469           emitInlineAsmError(Call, "write to reserved register '" +
9470                                        Twine(RegName) + "'");
9471           return true;
9472         }
9473       }
9474       return false;
9475     };
9476     assert((OpInfo.ConstraintType != TargetLowering::C_Address ||
9477             (OpInfo.Type == InlineAsm::isInput &&
9478              !OpInfo.isMatchingInputConstraint())) &&
9479            "Only address as input operand is allowed.");
9480 
9481     switch (OpInfo.Type) {
9482     case InlineAsm::isOutput:
9483       if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
9484         const InlineAsm::ConstraintCode ConstraintID =
9485             TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
9486         assert(ConstraintID != InlineAsm::ConstraintCode::Unknown &&
9487                "Failed to convert memory constraint code to constraint id.");
9488 
9489         // Add information to the INLINEASM node to know about this output.
9490         InlineAsm::Flag OpFlags(InlineAsm::Kind::Mem, 1);
9491         OpFlags.setMemConstraint(ConstraintID);
9492         AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlags, getCurSDLoc(),
9493                                                         MVT::i32));
9494         AsmNodeOperands.push_back(OpInfo.CallOperand);
9495       } else {
9496         // Otherwise, this outputs to a register (directly for C_Register /
9497         // C_RegisterClass, and a target-defined fashion for
9498         // C_Immediate/C_Other). Find a register that we can use.
9499         if (OpInfo.AssignedRegs.Regs.empty()) {
9500           emitInlineAsmError(
9501               Call, "couldn't allocate output register for constraint '" +
9502                         Twine(OpInfo.ConstraintCode) + "'");
9503           return;
9504         }
9505 
9506         if (DetectWriteToReservedRegister())
9507           return;
9508 
9509         // Add information to the INLINEASM node to know that this register is
9510         // set.
9511         OpInfo.AssignedRegs.AddInlineAsmOperands(
9512             OpInfo.isEarlyClobber ? InlineAsm::Kind::RegDefEarlyClobber
9513                                   : InlineAsm::Kind::RegDef,
9514             false, 0, getCurSDLoc(), DAG, AsmNodeOperands);
9515       }
9516       break;
9517 
9518     case InlineAsm::isInput:
9519     case InlineAsm::isLabel: {
9520       SDValue InOperandVal = OpInfo.CallOperand;
9521 
9522       if (OpInfo.isMatchingInputConstraint()) {
9523         // If this is required to match an output register we have already set,
9524         // just use its register.
9525         auto CurOp = findMatchingInlineAsmOperand(OpInfo.getMatchedOperand(),
9526                                                   AsmNodeOperands);
9527         InlineAsm::Flag Flag(AsmNodeOperands[CurOp]->getAsZExtVal());
9528         if (Flag.isRegDefKind() || Flag.isRegDefEarlyClobberKind()) {
9529           if (OpInfo.isIndirect) {
9530             // This happens on gcc/testsuite/gcc.dg/pr8788-1.c
9531             emitInlineAsmError(Call, "inline asm not supported yet: "
9532                                      "don't know how to handle tied "
9533                                      "indirect register inputs");
9534             return;
9535           }
9536 
9537           SmallVector<unsigned, 4> Regs;
9538           MachineFunction &MF = DAG.getMachineFunction();
9539           MachineRegisterInfo &MRI = MF.getRegInfo();
9540           const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
9541           auto *R = cast<RegisterSDNode>(AsmNodeOperands[CurOp+1]);
9542           Register TiedReg = R->getReg();
9543           MVT RegVT = R->getSimpleValueType(0);
9544           const TargetRegisterClass *RC =
9545               TiedReg.isVirtual()     ? MRI.getRegClass(TiedReg)
9546               : RegVT != MVT::Untyped ? TLI.getRegClassFor(RegVT)
9547                                       : TRI.getMinimalPhysRegClass(TiedReg);
9548           for (unsigned i = 0, e = Flag.getNumOperandRegisters(); i != e; ++i)
9549             Regs.push_back(MRI.createVirtualRegister(RC));
9550 
9551           RegsForValue MatchedRegs(Regs, RegVT, InOperandVal.getValueType());
9552 
9553           SDLoc dl = getCurSDLoc();
9554           // Use the produced MatchedRegs object to
9555           MatchedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Glue, &Call);
9556           MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind::RegUse, true,
9557                                            OpInfo.getMatchedOperand(), dl, DAG,
9558                                            AsmNodeOperands);
9559           break;
9560         }
9561 
9562         assert(Flag.isMemKind() && "Unknown matching constraint!");
9563         assert(Flag.getNumOperandRegisters() == 1 &&
9564                "Unexpected number of operands");
9565         // Add information to the INLINEASM node to know about this input.
9566         // See InlineAsm.h isUseOperandTiedToDef.
9567         Flag.clearMemConstraint();
9568         Flag.setMatchingOp(OpInfo.getMatchedOperand());
9569         AsmNodeOperands.push_back(DAG.getTargetConstant(
9570             Flag, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
9571         AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
9572         break;
9573       }
9574 
9575       // Treat indirect 'X' constraint as memory.
9576       if (OpInfo.ConstraintType == TargetLowering::C_Other &&
9577           OpInfo.isIndirect)
9578         OpInfo.ConstraintType = TargetLowering::C_Memory;
9579 
9580       if (OpInfo.ConstraintType == TargetLowering::C_Immediate ||
9581           OpInfo.ConstraintType == TargetLowering::C_Other) {
9582         std::vector<SDValue> Ops;
9583         TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode,
9584                                           Ops, DAG);
9585         if (Ops.empty()) {
9586           if (OpInfo.ConstraintType == TargetLowering::C_Immediate)
9587             if (isa<ConstantSDNode>(InOperandVal)) {
9588               emitInlineAsmError(Call, "value out of range for constraint '" +
9589                                            Twine(OpInfo.ConstraintCode) + "'");
9590               return;
9591             }
9592 
9593           emitInlineAsmError(Call,
9594                              "invalid operand for inline asm constraint '" +
9595                                  Twine(OpInfo.ConstraintCode) + "'");
9596           return;
9597         }
9598 
9599         // Add information to the INLINEASM node to know about this input.
9600         InlineAsm::Flag ResOpType(InlineAsm::Kind::Imm, Ops.size());
9601         AsmNodeOperands.push_back(DAG.getTargetConstant(
9602             ResOpType, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
9603         llvm::append_range(AsmNodeOperands, Ops);
9604         break;
9605       }
9606 
9607       if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
9608         assert((OpInfo.isIndirect ||
9609                 OpInfo.ConstraintType != TargetLowering::C_Memory) &&
9610                "Operand must be indirect to be a mem!");
9611         assert(InOperandVal.getValueType() ==
9612                    TLI.getPointerTy(DAG.getDataLayout()) &&
9613                "Memory operands expect pointer values");
9614 
9615         const InlineAsm::ConstraintCode ConstraintID =
9616             TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
9617         assert(ConstraintID != InlineAsm::ConstraintCode::Unknown &&
9618                "Failed to convert memory constraint code to constraint id.");
9619 
9620         // Add information to the INLINEASM node to know about this input.
9621         InlineAsm::Flag ResOpType(InlineAsm::Kind::Mem, 1);
9622         ResOpType.setMemConstraint(ConstraintID);
9623         AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
9624                                                         getCurSDLoc(),
9625                                                         MVT::i32));
9626         AsmNodeOperands.push_back(InOperandVal);
9627         break;
9628       }
9629 
9630       if (OpInfo.ConstraintType == TargetLowering::C_Address) {
9631         const InlineAsm::ConstraintCode ConstraintID =
9632             TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
9633         assert(ConstraintID != InlineAsm::ConstraintCode::Unknown &&
9634                "Failed to convert memory constraint code to constraint id.");
9635 
9636         InlineAsm::Flag ResOpType(InlineAsm::Kind::Mem, 1);
9637 
9638         SDValue AsmOp = InOperandVal;
9639         if (isFunction(InOperandVal)) {
9640           auto *GA = cast<GlobalAddressSDNode>(InOperandVal);
9641           ResOpType = InlineAsm::Flag(InlineAsm::Kind::Func, 1);
9642           AsmOp = DAG.getTargetGlobalAddress(GA->getGlobal(), getCurSDLoc(),
9643                                              InOperandVal.getValueType(),
9644                                              GA->getOffset());
9645         }
9646 
9647         // Add information to the INLINEASM node to know about this input.
9648         ResOpType.setMemConstraint(ConstraintID);
9649 
9650         AsmNodeOperands.push_back(
9651             DAG.getTargetConstant(ResOpType, getCurSDLoc(), MVT::i32));
9652 
9653         AsmNodeOperands.push_back(AsmOp);
9654         break;
9655       }
9656 
9657       assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
9658               OpInfo.ConstraintType == TargetLowering::C_Register) &&
9659              "Unknown constraint type!");
9660 
9661       // TODO: Support this.
9662       if (OpInfo.isIndirect) {
9663         emitInlineAsmError(
9664             Call, "Don't know how to handle indirect register inputs yet "
9665                   "for constraint '" +
9666                       Twine(OpInfo.ConstraintCode) + "'");
9667         return;
9668       }
9669 
9670       // Copy the input into the appropriate registers.
9671       if (OpInfo.AssignedRegs.Regs.empty()) {
9672         emitInlineAsmError(Call,
9673                            "couldn't allocate input reg for constraint '" +
9674                                Twine(OpInfo.ConstraintCode) + "'");
9675         return;
9676       }
9677 
9678       if (DetectWriteToReservedRegister())
9679         return;
9680 
9681       SDLoc dl = getCurSDLoc();
9682 
9683       OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Glue,
9684                                         &Call);
9685 
9686       OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind::RegUse, false,
9687                                                0, dl, DAG, AsmNodeOperands);
9688       break;
9689     }
9690     case InlineAsm::isClobber:
9691       // Add the clobbered value to the operand list, so that the register
9692       // allocator is aware that the physreg got clobbered.
9693       if (!OpInfo.AssignedRegs.Regs.empty())
9694         OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind::Clobber,
9695                                                  false, 0, getCurSDLoc(), DAG,
9696                                                  AsmNodeOperands);
9697       break;
9698     }
9699   }
9700 
9701   // Finish up input operands.  Set the input chain and add the flag last.
9702   AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
9703   if (Glue.getNode()) AsmNodeOperands.push_back(Glue);
9704 
9705   unsigned ISDOpc = IsCallBr ? ISD::INLINEASM_BR : ISD::INLINEASM;
9706   Chain = DAG.getNode(ISDOpc, getCurSDLoc(),
9707                       DAG.getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
9708   Glue = Chain.getValue(1);
9709 
9710   // Do additional work to generate outputs.
9711 
9712   SmallVector<EVT, 1> ResultVTs;
9713   SmallVector<SDValue, 1> ResultValues;
9714   SmallVector<SDValue, 8> OutChains;
9715 
9716   llvm::Type *CallResultType = Call.getType();
9717   ArrayRef<Type *> ResultTypes;
9718   if (StructType *StructResult = dyn_cast<StructType>(CallResultType))
9719     ResultTypes = StructResult->elements();
9720   else if (!CallResultType->isVoidTy())
9721     ResultTypes = ArrayRef(CallResultType);
9722 
9723   auto CurResultType = ResultTypes.begin();
9724   auto handleRegAssign = [&](SDValue V) {
9725     assert(CurResultType != ResultTypes.end() && "Unexpected value");
9726     assert((*CurResultType)->isSized() && "Unexpected unsized type");
9727     EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), *CurResultType);
9728     ++CurResultType;
9729     // If the type of the inline asm call site return value is different but has
9730     // same size as the type of the asm output bitcast it.  One example of this
9731     // is for vectors with different width / number of elements.  This can
9732     // happen for register classes that can contain multiple different value
9733     // types.  The preg or vreg allocated may not have the same VT as was
9734     // expected.
9735     //
9736     // This can also happen for a return value that disagrees with the register
9737     // class it is put in, eg. a double in a general-purpose register on a
9738     // 32-bit machine.
9739     if (ResultVT != V.getValueType() &&
9740         ResultVT.getSizeInBits() == V.getValueSizeInBits())
9741       V = DAG.getNode(ISD::BITCAST, getCurSDLoc(), ResultVT, V);
9742     else if (ResultVT != V.getValueType() && ResultVT.isInteger() &&
9743              V.getValueType().isInteger()) {
9744       // If a result value was tied to an input value, the computed result
9745       // may have a wider width than the expected result.  Extract the
9746       // relevant portion.
9747       V = DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), ResultVT, V);
9748     }
9749     assert(ResultVT == V.getValueType() && "Asm result value mismatch!");
9750     ResultVTs.push_back(ResultVT);
9751     ResultValues.push_back(V);
9752   };
9753 
9754   // Deal with output operands.
9755   for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
9756     if (OpInfo.Type == InlineAsm::isOutput) {
9757       SDValue Val;
9758       // Skip trivial output operands.
9759       if (OpInfo.AssignedRegs.Regs.empty())
9760         continue;
9761 
9762       switch (OpInfo.ConstraintType) {
9763       case TargetLowering::C_Register:
9764       case TargetLowering::C_RegisterClass:
9765         Val = OpInfo.AssignedRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(),
9766                                                   Chain, &Glue, &Call);
9767         break;
9768       case TargetLowering::C_Immediate:
9769       case TargetLowering::C_Other:
9770         Val = TLI.LowerAsmOutputForConstraint(Chain, Glue, getCurSDLoc(),
9771                                               OpInfo, DAG);
9772         break;
9773       case TargetLowering::C_Memory:
9774         break; // Already handled.
9775       case TargetLowering::C_Address:
9776         break; // Silence warning.
9777       case TargetLowering::C_Unknown:
9778         assert(false && "Unexpected unknown constraint");
9779       }
9780 
9781       // Indirect output manifest as stores. Record output chains.
9782       if (OpInfo.isIndirect) {
9783         const Value *Ptr = OpInfo.CallOperandVal;
9784         assert(Ptr && "Expected value CallOperandVal for indirect asm operand");
9785         SDValue Store = DAG.getStore(Chain, getCurSDLoc(), Val, getValue(Ptr),
9786                                      MachinePointerInfo(Ptr));
9787         OutChains.push_back(Store);
9788       } else {
9789         // generate CopyFromRegs to associated registers.
9790         assert(!Call.getType()->isVoidTy() && "Bad inline asm!");
9791         if (Val.getOpcode() == ISD::MERGE_VALUES) {
9792           for (const SDValue &V : Val->op_values())
9793             handleRegAssign(V);
9794         } else
9795           handleRegAssign(Val);
9796       }
9797     }
9798   }
9799 
9800   // Set results.
9801   if (!ResultValues.empty()) {
9802     assert(CurResultType == ResultTypes.end() &&
9803            "Mismatch in number of ResultTypes");
9804     assert(ResultValues.size() == ResultTypes.size() &&
9805            "Mismatch in number of output operands in asm result");
9806 
9807     SDValue V = DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
9808                             DAG.getVTList(ResultVTs), ResultValues);
9809     setValue(&Call, V);
9810   }
9811 
9812   // Collect store chains.
9813   if (!OutChains.empty())
9814     Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, OutChains);
9815 
9816   if (EmitEHLabels) {
9817     Chain = lowerEndEH(Chain, cast<InvokeInst>(&Call), EHPadBB, BeginLabel);
9818   }
9819 
9820   // Only Update Root if inline assembly has a memory effect.
9821   if (ResultValues.empty() || HasSideEffect || !OutChains.empty() || IsCallBr ||
9822       EmitEHLabels)
9823     DAG.setRoot(Chain);
9824 }
9825 
9826 void SelectionDAGBuilder::emitInlineAsmError(const CallBase &Call,
9827                                              const Twine &Message) {
9828   LLVMContext &Ctx = *DAG.getContext();
9829   Ctx.emitError(&Call, Message);
9830 
9831   // Make sure we leave the DAG in a valid state
9832   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9833   SmallVector<EVT, 1> ValueVTs;
9834   ComputeValueVTs(TLI, DAG.getDataLayout(), Call.getType(), ValueVTs);
9835 
9836   if (ValueVTs.empty())
9837     return;
9838 
9839   SmallVector<SDValue, 1> Ops;
9840   for (unsigned i = 0, e = ValueVTs.size(); i != e; ++i)
9841     Ops.push_back(DAG.getUNDEF(ValueVTs[i]));
9842 
9843   setValue(&Call, DAG.getMergeValues(Ops, getCurSDLoc()));
9844 }
9845 
9846 void SelectionDAGBuilder::visitVAStart(const CallInst &I) {
9847   DAG.setRoot(DAG.getNode(ISD::VASTART, getCurSDLoc(),
9848                           MVT::Other, getRoot(),
9849                           getValue(I.getArgOperand(0)),
9850                           DAG.getSrcValue(I.getArgOperand(0))));
9851 }
9852 
9853 void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
9854   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9855   const DataLayout &DL = DAG.getDataLayout();
9856   SDValue V = DAG.getVAArg(
9857       TLI.getMemValueType(DAG.getDataLayout(), I.getType()), getCurSDLoc(),
9858       getRoot(), getValue(I.getOperand(0)), DAG.getSrcValue(I.getOperand(0)),
9859       DL.getABITypeAlign(I.getType()).value());
9860   DAG.setRoot(V.getValue(1));
9861 
9862   if (I.getType()->isPointerTy())
9863     V = DAG.getPtrExtOrTrunc(
9864         V, getCurSDLoc(), TLI.getValueType(DAG.getDataLayout(), I.getType()));
9865   setValue(&I, V);
9866 }
9867 
9868 void SelectionDAGBuilder::visitVAEnd(const CallInst &I) {
9869   DAG.setRoot(DAG.getNode(ISD::VAEND, getCurSDLoc(),
9870                           MVT::Other, getRoot(),
9871                           getValue(I.getArgOperand(0)),
9872                           DAG.getSrcValue(I.getArgOperand(0))));
9873 }
9874 
9875 void SelectionDAGBuilder::visitVACopy(const CallInst &I) {
9876   DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurSDLoc(),
9877                           MVT::Other, getRoot(),
9878                           getValue(I.getArgOperand(0)),
9879                           getValue(I.getArgOperand(1)),
9880                           DAG.getSrcValue(I.getArgOperand(0)),
9881                           DAG.getSrcValue(I.getArgOperand(1))));
9882 }
9883 
9884 SDValue SelectionDAGBuilder::lowerRangeToAssertZExt(SelectionDAG &DAG,
9885                                                     const Instruction &I,
9886                                                     SDValue Op) {
9887   const MDNode *Range = getRangeMetadata(I);
9888   if (!Range)
9889     return Op;
9890 
9891   ConstantRange CR = getConstantRangeFromMetadata(*Range);
9892   if (CR.isFullSet() || CR.isEmptySet() || CR.isUpperWrapped())
9893     return Op;
9894 
9895   APInt Lo = CR.getUnsignedMin();
9896   if (!Lo.isMinValue())
9897     return Op;
9898 
9899   APInt Hi = CR.getUnsignedMax();
9900   unsigned Bits = std::max(Hi.getActiveBits(),
9901                            static_cast<unsigned>(IntegerType::MIN_INT_BITS));
9902 
9903   EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
9904 
9905   SDLoc SL = getCurSDLoc();
9906 
9907   SDValue ZExt = DAG.getNode(ISD::AssertZext, SL, Op.getValueType(), Op,
9908                              DAG.getValueType(SmallVT));
9909   unsigned NumVals = Op.getNode()->getNumValues();
9910   if (NumVals == 1)
9911     return ZExt;
9912 
9913   SmallVector<SDValue, 4> Ops;
9914 
9915   Ops.push_back(ZExt);
9916   for (unsigned I = 1; I != NumVals; ++I)
9917     Ops.push_back(Op.getValue(I));
9918 
9919   return DAG.getMergeValues(Ops, SL);
9920 }
9921 
9922 /// Populate a CallLowerinInfo (into \p CLI) based on the properties of
9923 /// the call being lowered.
9924 ///
9925 /// This is a helper for lowering intrinsics that follow a target calling
9926 /// convention or require stack pointer adjustment. Only a subset of the
9927 /// intrinsic's operands need to participate in the calling convention.
9928 void SelectionDAGBuilder::populateCallLoweringInfo(
9929     TargetLowering::CallLoweringInfo &CLI, const CallBase *Call,
9930     unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy,
9931     AttributeSet RetAttrs, bool IsPatchPoint) {
9932   TargetLowering::ArgListTy Args;
9933   Args.reserve(NumArgs);
9934 
9935   // Populate the argument list.
9936   // Attributes for args start at offset 1, after the return attribute.
9937   for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs;
9938        ArgI != ArgE; ++ArgI) {
9939     const Value *V = Call->getOperand(ArgI);
9940 
9941     assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
9942 
9943     TargetLowering::ArgListEntry Entry;
9944     Entry.Node = getValue(V);
9945     Entry.Ty = V->getType();
9946     Entry.setAttributes(Call, ArgI);
9947     Args.push_back(Entry);
9948   }
9949 
9950   CLI.setDebugLoc(getCurSDLoc())
9951       .setChain(getRoot())
9952       .setCallee(Call->getCallingConv(), ReturnTy, Callee, std::move(Args),
9953                  RetAttrs)
9954       .setDiscardResult(Call->use_empty())
9955       .setIsPatchPoint(IsPatchPoint)
9956       .setIsPreallocated(
9957           Call->countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0);
9958 }
9959 
9960 /// Add a stack map intrinsic call's live variable operands to a stackmap
9961 /// or patchpoint target node's operand list.
9962 ///
9963 /// Constants are converted to TargetConstants purely as an optimization to
9964 /// avoid constant materialization and register allocation.
9965 ///
9966 /// FrameIndex operands are converted to TargetFrameIndex so that ISEL does not
9967 /// generate addess computation nodes, and so FinalizeISel can convert the
9968 /// TargetFrameIndex into a DirectMemRefOp StackMap location. This avoids
9969 /// address materialization and register allocation, but may also be required
9970 /// for correctness. If a StackMap (or PatchPoint) intrinsic directly uses an
9971 /// alloca in the entry block, then the runtime may assume that the alloca's
9972 /// StackMap location can be read immediately after compilation and that the
9973 /// location is valid at any point during execution (this is similar to the
9974 /// assumption made by the llvm.gcroot intrinsic). If the alloca's location were
9975 /// only available in a register, then the runtime would need to trap when
9976 /// execution reaches the StackMap in order to read the alloca's location.
9977 static void addStackMapLiveVars(const CallBase &Call, unsigned StartIdx,
9978                                 const SDLoc &DL, SmallVectorImpl<SDValue> &Ops,
9979                                 SelectionDAGBuilder &Builder) {
9980   SelectionDAG &DAG = Builder.DAG;
9981   for (unsigned I = StartIdx; I < Call.arg_size(); I++) {
9982     SDValue Op = Builder.getValue(Call.getArgOperand(I));
9983 
9984     // Things on the stack are pointer-typed, meaning that they are already
9985     // legal and can be emitted directly to target nodes.
9986     if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op)) {
9987       Ops.push_back(DAG.getTargetFrameIndex(FI->getIndex(), Op.getValueType()));
9988     } else {
9989       // Otherwise emit a target independent node to be legalised.
9990       Ops.push_back(Builder.getValue(Call.getArgOperand(I)));
9991     }
9992   }
9993 }
9994 
9995 /// Lower llvm.experimental.stackmap.
9996 void SelectionDAGBuilder::visitStackmap(const CallInst &CI) {
9997   // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
9998   //                                  [live variables...])
9999 
10000   assert(CI.getType()->isVoidTy() && "Stackmap cannot return a value.");
10001 
10002   SDValue Chain, InGlue, Callee;
10003   SmallVector<SDValue, 32> Ops;
10004 
10005   SDLoc DL = getCurSDLoc();
10006   Callee = getValue(CI.getCalledOperand());
10007 
10008   // The stackmap intrinsic only records the live variables (the arguments
10009   // passed to it) and emits NOPS (if requested). Unlike the patchpoint
10010   // intrinsic, this won't be lowered to a function call. This means we don't
10011   // have to worry about calling conventions and target specific lowering code.
10012   // Instead we perform the call lowering right here.
10013   //
10014   // chain, flag = CALLSEQ_START(chain, 0, 0)
10015   // chain, flag = STACKMAP(id, nbytes, ..., chain, flag)
10016   // chain, flag = CALLSEQ_END(chain, 0, 0, flag)
10017   //
10018   Chain = DAG.getCALLSEQ_START(getRoot(), 0, 0, DL);
10019   InGlue = Chain.getValue(1);
10020 
10021   // Add the STACKMAP operands, starting with DAG house-keeping.
10022   Ops.push_back(Chain);
10023   Ops.push_back(InGlue);
10024 
10025   // Add the <id>, <numShadowBytes> operands.
10026   //
10027   // These do not require legalisation, and can be emitted directly to target
10028   // constant nodes.
10029   SDValue ID = getValue(CI.getArgOperand(0));
10030   assert(ID.getValueType() == MVT::i64);
10031   SDValue IDConst =
10032       DAG.getTargetConstant(ID->getAsZExtVal(), DL, ID.getValueType());
10033   Ops.push_back(IDConst);
10034 
10035   SDValue Shad = getValue(CI.getArgOperand(1));
10036   assert(Shad.getValueType() == MVT::i32);
10037   SDValue ShadConst =
10038       DAG.getTargetConstant(Shad->getAsZExtVal(), DL, Shad.getValueType());
10039   Ops.push_back(ShadConst);
10040 
10041   // Add the live variables.
10042   addStackMapLiveVars(CI, 2, DL, Ops, *this);
10043 
10044   // Create the STACKMAP node.
10045   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
10046   Chain = DAG.getNode(ISD::STACKMAP, DL, NodeTys, Ops);
10047   InGlue = Chain.getValue(1);
10048 
10049   Chain = DAG.getCALLSEQ_END(Chain, 0, 0, InGlue, DL);
10050 
10051   // Stackmaps don't generate values, so nothing goes into the NodeMap.
10052 
10053   // Set the root to the target-lowered call chain.
10054   DAG.setRoot(Chain);
10055 
10056   // Inform the Frame Information that we have a stackmap in this function.
10057   FuncInfo.MF->getFrameInfo().setHasStackMap();
10058 }
10059 
10060 /// Lower llvm.experimental.patchpoint directly to its target opcode.
10061 void SelectionDAGBuilder::visitPatchpoint(const CallBase &CB,
10062                                           const BasicBlock *EHPadBB) {
10063   // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
10064   //                                                 i32 <numBytes>,
10065   //                                                 i8* <target>,
10066   //                                                 i32 <numArgs>,
10067   //                                                 [Args...],
10068   //                                                 [live variables...])
10069 
10070   CallingConv::ID CC = CB.getCallingConv();
10071   bool IsAnyRegCC = CC == CallingConv::AnyReg;
10072   bool HasDef = !CB.getType()->isVoidTy();
10073   SDLoc dl = getCurSDLoc();
10074   SDValue Callee = getValue(CB.getArgOperand(PatchPointOpers::TargetPos));
10075 
10076   // Handle immediate and symbolic callees.
10077   if (auto* ConstCallee = dyn_cast<ConstantSDNode>(Callee))
10078     Callee = DAG.getIntPtrConstant(ConstCallee->getZExtValue(), dl,
10079                                    /*isTarget=*/true);
10080   else if (auto* SymbolicCallee = dyn_cast<GlobalAddressSDNode>(Callee))
10081     Callee =  DAG.getTargetGlobalAddress(SymbolicCallee->getGlobal(),
10082                                          SDLoc(SymbolicCallee),
10083                                          SymbolicCallee->getValueType(0));
10084 
10085   // Get the real number of arguments participating in the call <numArgs>
10086   SDValue NArgVal = getValue(CB.getArgOperand(PatchPointOpers::NArgPos));
10087   unsigned NumArgs = NArgVal->getAsZExtVal();
10088 
10089   // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
10090   // Intrinsics include all meta-operands up to but not including CC.
10091   unsigned NumMetaOpers = PatchPointOpers::CCPos;
10092   assert(CB.arg_size() >= NumMetaOpers + NumArgs &&
10093          "Not enough arguments provided to the patchpoint intrinsic");
10094 
10095   // For AnyRegCC the arguments are lowered later on manually.
10096   unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
10097   Type *ReturnTy =
10098       IsAnyRegCC ? Type::getVoidTy(*DAG.getContext()) : CB.getType();
10099 
10100   TargetLowering::CallLoweringInfo CLI(DAG);
10101   populateCallLoweringInfo(CLI, &CB, NumMetaOpers, NumCallArgs, Callee,
10102                            ReturnTy, CB.getAttributes().getRetAttrs(), true);
10103   std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
10104 
10105   SDNode *CallEnd = Result.second.getNode();
10106   if (HasDef && (CallEnd->getOpcode() == ISD::CopyFromReg))
10107     CallEnd = CallEnd->getOperand(0).getNode();
10108 
10109   /// Get a call instruction from the call sequence chain.
10110   /// Tail calls are not allowed.
10111   assert(CallEnd->getOpcode() == ISD::CALLSEQ_END &&
10112          "Expected a callseq node.");
10113   SDNode *Call = CallEnd->getOperand(0).getNode();
10114   bool HasGlue = Call->getGluedNode();
10115 
10116   // Replace the target specific call node with the patchable intrinsic.
10117   SmallVector<SDValue, 8> Ops;
10118 
10119   // Push the chain.
10120   Ops.push_back(*(Call->op_begin()));
10121 
10122   // Optionally, push the glue (if any).
10123   if (HasGlue)
10124     Ops.push_back(*(Call->op_end() - 1));
10125 
10126   // Push the register mask info.
10127   if (HasGlue)
10128     Ops.push_back(*(Call->op_end() - 2));
10129   else
10130     Ops.push_back(*(Call->op_end() - 1));
10131 
10132   // Add the <id> and <numBytes> constants.
10133   SDValue IDVal = getValue(CB.getArgOperand(PatchPointOpers::IDPos));
10134   Ops.push_back(DAG.getTargetConstant(IDVal->getAsZExtVal(), dl, MVT::i64));
10135   SDValue NBytesVal = getValue(CB.getArgOperand(PatchPointOpers::NBytesPos));
10136   Ops.push_back(DAG.getTargetConstant(NBytesVal->getAsZExtVal(), dl, MVT::i32));
10137 
10138   // Add the callee.
10139   Ops.push_back(Callee);
10140 
10141   // Adjust <numArgs> to account for any arguments that have been passed on the
10142   // stack instead.
10143   // Call Node: Chain, Target, {Args}, RegMask, [Glue]
10144   unsigned NumCallRegArgs = Call->getNumOperands() - (HasGlue ? 4 : 3);
10145   NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs;
10146   Ops.push_back(DAG.getTargetConstant(NumCallRegArgs, dl, MVT::i32));
10147 
10148   // Add the calling convention
10149   Ops.push_back(DAG.getTargetConstant((unsigned)CC, dl, MVT::i32));
10150 
10151   // Add the arguments we omitted previously. The register allocator should
10152   // place these in any free register.
10153   if (IsAnyRegCC)
10154     for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i)
10155       Ops.push_back(getValue(CB.getArgOperand(i)));
10156 
10157   // Push the arguments from the call instruction.
10158   SDNode::op_iterator e = HasGlue ? Call->op_end()-2 : Call->op_end()-1;
10159   Ops.append(Call->op_begin() + 2, e);
10160 
10161   // Push live variables for the stack map.
10162   addStackMapLiveVars(CB, NumMetaOpers + NumArgs, dl, Ops, *this);
10163 
10164   SDVTList NodeTys;
10165   if (IsAnyRegCC && HasDef) {
10166     // Create the return types based on the intrinsic definition
10167     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10168     SmallVector<EVT, 3> ValueVTs;
10169     ComputeValueVTs(TLI, DAG.getDataLayout(), CB.getType(), ValueVTs);
10170     assert(ValueVTs.size() == 1 && "Expected only one return value type.");
10171 
10172     // There is always a chain and a glue type at the end
10173     ValueVTs.push_back(MVT::Other);
10174     ValueVTs.push_back(MVT::Glue);
10175     NodeTys = DAG.getVTList(ValueVTs);
10176   } else
10177     NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
10178 
10179   // Replace the target specific call node with a PATCHPOINT node.
10180   SDValue PPV = DAG.getNode(ISD::PATCHPOINT, dl, NodeTys, Ops);
10181 
10182   // Update the NodeMap.
10183   if (HasDef) {
10184     if (IsAnyRegCC)
10185       setValue(&CB, SDValue(PPV.getNode(), 0));
10186     else
10187       setValue(&CB, Result.first);
10188   }
10189 
10190   // Fixup the consumers of the intrinsic. The chain and glue may be used in the
10191   // call sequence. Furthermore the location of the chain and glue can change
10192   // when the AnyReg calling convention is used and the intrinsic returns a
10193   // value.
10194   if (IsAnyRegCC && HasDef) {
10195     SDValue From[] = {SDValue(Call, 0), SDValue(Call, 1)};
10196     SDValue To[] = {PPV.getValue(1), PPV.getValue(2)};
10197     DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
10198   } else
10199     DAG.ReplaceAllUsesWith(Call, PPV.getNode());
10200   DAG.DeleteNode(Call);
10201 
10202   // Inform the Frame Information that we have a patchpoint in this function.
10203   FuncInfo.MF->getFrameInfo().setHasPatchPoint();
10204 }
10205 
10206 void SelectionDAGBuilder::visitVectorReduce(const CallInst &I,
10207                                             unsigned Intrinsic) {
10208   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10209   SDValue Op1 = getValue(I.getArgOperand(0));
10210   SDValue Op2;
10211   if (I.arg_size() > 1)
10212     Op2 = getValue(I.getArgOperand(1));
10213   SDLoc dl = getCurSDLoc();
10214   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
10215   SDValue Res;
10216   SDNodeFlags SDFlags;
10217   if (auto *FPMO = dyn_cast<FPMathOperator>(&I))
10218     SDFlags.copyFMF(*FPMO);
10219 
10220   switch (Intrinsic) {
10221   case Intrinsic::vector_reduce_fadd:
10222     if (SDFlags.hasAllowReassociation())
10223       Res = DAG.getNode(ISD::FADD, dl, VT, Op1,
10224                         DAG.getNode(ISD::VECREDUCE_FADD, dl, VT, Op2, SDFlags),
10225                         SDFlags);
10226     else
10227       Res = DAG.getNode(ISD::VECREDUCE_SEQ_FADD, dl, VT, Op1, Op2, SDFlags);
10228     break;
10229   case Intrinsic::vector_reduce_fmul:
10230     if (SDFlags.hasAllowReassociation())
10231       Res = DAG.getNode(ISD::FMUL, dl, VT, Op1,
10232                         DAG.getNode(ISD::VECREDUCE_FMUL, dl, VT, Op2, SDFlags),
10233                         SDFlags);
10234     else
10235       Res = DAG.getNode(ISD::VECREDUCE_SEQ_FMUL, dl, VT, Op1, Op2, SDFlags);
10236     break;
10237   case Intrinsic::vector_reduce_add:
10238     Res = DAG.getNode(ISD::VECREDUCE_ADD, dl, VT, Op1);
10239     break;
10240   case Intrinsic::vector_reduce_mul:
10241     Res = DAG.getNode(ISD::VECREDUCE_MUL, dl, VT, Op1);
10242     break;
10243   case Intrinsic::vector_reduce_and:
10244     Res = DAG.getNode(ISD::VECREDUCE_AND, dl, VT, Op1);
10245     break;
10246   case Intrinsic::vector_reduce_or:
10247     Res = DAG.getNode(ISD::VECREDUCE_OR, dl, VT, Op1);
10248     break;
10249   case Intrinsic::vector_reduce_xor:
10250     Res = DAG.getNode(ISD::VECREDUCE_XOR, dl, VT, Op1);
10251     break;
10252   case Intrinsic::vector_reduce_smax:
10253     Res = DAG.getNode(ISD::VECREDUCE_SMAX, dl, VT, Op1);
10254     break;
10255   case Intrinsic::vector_reduce_smin:
10256     Res = DAG.getNode(ISD::VECREDUCE_SMIN, dl, VT, Op1);
10257     break;
10258   case Intrinsic::vector_reduce_umax:
10259     Res = DAG.getNode(ISD::VECREDUCE_UMAX, dl, VT, Op1);
10260     break;
10261   case Intrinsic::vector_reduce_umin:
10262     Res = DAG.getNode(ISD::VECREDUCE_UMIN, dl, VT, Op1);
10263     break;
10264   case Intrinsic::vector_reduce_fmax:
10265     Res = DAG.getNode(ISD::VECREDUCE_FMAX, dl, VT, Op1, SDFlags);
10266     break;
10267   case Intrinsic::vector_reduce_fmin:
10268     Res = DAG.getNode(ISD::VECREDUCE_FMIN, dl, VT, Op1, SDFlags);
10269     break;
10270   case Intrinsic::vector_reduce_fmaximum:
10271     Res = DAG.getNode(ISD::VECREDUCE_FMAXIMUM, dl, VT, Op1, SDFlags);
10272     break;
10273   case Intrinsic::vector_reduce_fminimum:
10274     Res = DAG.getNode(ISD::VECREDUCE_FMINIMUM, dl, VT, Op1, SDFlags);
10275     break;
10276   default:
10277     llvm_unreachable("Unhandled vector reduce intrinsic");
10278   }
10279   setValue(&I, Res);
10280 }
10281 
10282 /// Returns an AttributeList representing the attributes applied to the return
10283 /// value of the given call.
10284 static AttributeList getReturnAttrs(TargetLowering::CallLoweringInfo &CLI) {
10285   SmallVector<Attribute::AttrKind, 2> Attrs;
10286   if (CLI.RetSExt)
10287     Attrs.push_back(Attribute::SExt);
10288   if (CLI.RetZExt)
10289     Attrs.push_back(Attribute::ZExt);
10290   if (CLI.IsInReg)
10291     Attrs.push_back(Attribute::InReg);
10292 
10293   return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex,
10294                             Attrs);
10295 }
10296 
10297 /// TargetLowering::LowerCallTo - This is the default LowerCallTo
10298 /// implementation, which just calls LowerCall.
10299 /// FIXME: When all targets are
10300 /// migrated to using LowerCall, this hook should be integrated into SDISel.
10301 std::pair<SDValue, SDValue>
10302 TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
10303   // Handle the incoming return values from the call.
10304   CLI.Ins.clear();
10305   Type *OrigRetTy = CLI.RetTy;
10306   SmallVector<EVT, 4> RetTys;
10307   SmallVector<uint64_t, 4> Offsets;
10308   auto &DL = CLI.DAG.getDataLayout();
10309   ComputeValueVTs(*this, DL, CLI.RetTy, RetTys, &Offsets, 0);
10310 
10311   if (CLI.IsPostTypeLegalization) {
10312     // If we are lowering a libcall after legalization, split the return type.
10313     SmallVector<EVT, 4> OldRetTys;
10314     SmallVector<uint64_t, 4> OldOffsets;
10315     RetTys.swap(OldRetTys);
10316     Offsets.swap(OldOffsets);
10317 
10318     for (size_t i = 0, e = OldRetTys.size(); i != e; ++i) {
10319       EVT RetVT = OldRetTys[i];
10320       uint64_t Offset = OldOffsets[i];
10321       MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), RetVT);
10322       unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), RetVT);
10323       unsigned RegisterVTByteSZ = RegisterVT.getSizeInBits() / 8;
10324       RetTys.append(NumRegs, RegisterVT);
10325       for (unsigned j = 0; j != NumRegs; ++j)
10326         Offsets.push_back(Offset + j * RegisterVTByteSZ);
10327     }
10328   }
10329 
10330   SmallVector<ISD::OutputArg, 4> Outs;
10331   GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, *this, DL);
10332 
10333   bool CanLowerReturn =
10334       this->CanLowerReturn(CLI.CallConv, CLI.DAG.getMachineFunction(),
10335                            CLI.IsVarArg, Outs, CLI.RetTy->getContext());
10336 
10337   SDValue DemoteStackSlot;
10338   int DemoteStackIdx = -100;
10339   if (!CanLowerReturn) {
10340     // FIXME: equivalent assert?
10341     // assert(!CS.hasInAllocaArgument() &&
10342     //        "sret demotion is incompatible with inalloca");
10343     uint64_t TySize = DL.getTypeAllocSize(CLI.RetTy);
10344     Align Alignment = DL.getPrefTypeAlign(CLI.RetTy);
10345     MachineFunction &MF = CLI.DAG.getMachineFunction();
10346     DemoteStackIdx =
10347         MF.getFrameInfo().CreateStackObject(TySize, Alignment, false);
10348     Type *StackSlotPtrType = PointerType::get(CLI.RetTy,
10349                                               DL.getAllocaAddrSpace());
10350 
10351     DemoteStackSlot = CLI.DAG.getFrameIndex(DemoteStackIdx, getFrameIndexTy(DL));
10352     ArgListEntry Entry;
10353     Entry.Node = DemoteStackSlot;
10354     Entry.Ty = StackSlotPtrType;
10355     Entry.IsSExt = false;
10356     Entry.IsZExt = false;
10357     Entry.IsInReg = false;
10358     Entry.IsSRet = true;
10359     Entry.IsNest = false;
10360     Entry.IsByVal = false;
10361     Entry.IsByRef = false;
10362     Entry.IsReturned = false;
10363     Entry.IsSwiftSelf = false;
10364     Entry.IsSwiftAsync = false;
10365     Entry.IsSwiftError = false;
10366     Entry.IsCFGuardTarget = false;
10367     Entry.Alignment = Alignment;
10368     CLI.getArgs().insert(CLI.getArgs().begin(), Entry);
10369     CLI.NumFixedArgs += 1;
10370     CLI.getArgs()[0].IndirectType = CLI.RetTy;
10371     CLI.RetTy = Type::getVoidTy(CLI.RetTy->getContext());
10372 
10373     // sret demotion isn't compatible with tail-calls, since the sret argument
10374     // points into the callers stack frame.
10375     CLI.IsTailCall = false;
10376   } else {
10377     bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
10378         CLI.RetTy, CLI.CallConv, CLI.IsVarArg, DL);
10379     for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
10380       ISD::ArgFlagsTy Flags;
10381       if (NeedsRegBlock) {
10382         Flags.setInConsecutiveRegs();
10383         if (I == RetTys.size() - 1)
10384           Flags.setInConsecutiveRegsLast();
10385       }
10386       EVT VT = RetTys[I];
10387       MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
10388                                                      CLI.CallConv, VT);
10389       unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
10390                                                        CLI.CallConv, VT);
10391       for (unsigned i = 0; i != NumRegs; ++i) {
10392         ISD::InputArg MyFlags;
10393         MyFlags.Flags = Flags;
10394         MyFlags.VT = RegisterVT;
10395         MyFlags.ArgVT = VT;
10396         MyFlags.Used = CLI.IsReturnValueUsed;
10397         if (CLI.RetTy->isPointerTy()) {
10398           MyFlags.Flags.setPointer();
10399           MyFlags.Flags.setPointerAddrSpace(
10400               cast<PointerType>(CLI.RetTy)->getAddressSpace());
10401         }
10402         if (CLI.RetSExt)
10403           MyFlags.Flags.setSExt();
10404         if (CLI.RetZExt)
10405           MyFlags.Flags.setZExt();
10406         if (CLI.IsInReg)
10407           MyFlags.Flags.setInReg();
10408         CLI.Ins.push_back(MyFlags);
10409       }
10410     }
10411   }
10412 
10413   // We push in swifterror return as the last element of CLI.Ins.
10414   ArgListTy &Args = CLI.getArgs();
10415   if (supportSwiftError()) {
10416     for (const ArgListEntry &Arg : Args) {
10417       if (Arg.IsSwiftError) {
10418         ISD::InputArg MyFlags;
10419         MyFlags.VT = getPointerTy(DL);
10420         MyFlags.ArgVT = EVT(getPointerTy(DL));
10421         MyFlags.Flags.setSwiftError();
10422         CLI.Ins.push_back(MyFlags);
10423       }
10424     }
10425   }
10426 
10427   // Handle all of the outgoing arguments.
10428   CLI.Outs.clear();
10429   CLI.OutVals.clear();
10430   for (unsigned i = 0, e = Args.size(); i != e; ++i) {
10431     SmallVector<EVT, 4> ValueVTs;
10432     ComputeValueVTs(*this, DL, Args[i].Ty, ValueVTs);
10433     // FIXME: Split arguments if CLI.IsPostTypeLegalization
10434     Type *FinalType = Args[i].Ty;
10435     if (Args[i].IsByVal)
10436       FinalType = Args[i].IndirectType;
10437     bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
10438         FinalType, CLI.CallConv, CLI.IsVarArg, DL);
10439     for (unsigned Value = 0, NumValues = ValueVTs.size(); Value != NumValues;
10440          ++Value) {
10441       EVT VT = ValueVTs[Value];
10442       Type *ArgTy = VT.getTypeForEVT(CLI.RetTy->getContext());
10443       SDValue Op = SDValue(Args[i].Node.getNode(),
10444                            Args[i].Node.getResNo() + Value);
10445       ISD::ArgFlagsTy Flags;
10446 
10447       // Certain targets (such as MIPS), may have a different ABI alignment
10448       // for a type depending on the context. Give the target a chance to
10449       // specify the alignment it wants.
10450       const Align OriginalAlignment(getABIAlignmentForCallingConv(ArgTy, DL));
10451       Flags.setOrigAlign(OriginalAlignment);
10452 
10453       if (Args[i].Ty->isPointerTy()) {
10454         Flags.setPointer();
10455         Flags.setPointerAddrSpace(
10456             cast<PointerType>(Args[i].Ty)->getAddressSpace());
10457       }
10458       if (Args[i].IsZExt)
10459         Flags.setZExt();
10460       if (Args[i].IsSExt)
10461         Flags.setSExt();
10462       if (Args[i].IsInReg) {
10463         // If we are using vectorcall calling convention, a structure that is
10464         // passed InReg - is surely an HVA
10465         if (CLI.CallConv == CallingConv::X86_VectorCall &&
10466             isa<StructType>(FinalType)) {
10467           // The first value of a structure is marked
10468           if (0 == Value)
10469             Flags.setHvaStart();
10470           Flags.setHva();
10471         }
10472         // Set InReg Flag
10473         Flags.setInReg();
10474       }
10475       if (Args[i].IsSRet)
10476         Flags.setSRet();
10477       if (Args[i].IsSwiftSelf)
10478         Flags.setSwiftSelf();
10479       if (Args[i].IsSwiftAsync)
10480         Flags.setSwiftAsync();
10481       if (Args[i].IsSwiftError)
10482         Flags.setSwiftError();
10483       if (Args[i].IsCFGuardTarget)
10484         Flags.setCFGuardTarget();
10485       if (Args[i].IsByVal)
10486         Flags.setByVal();
10487       if (Args[i].IsByRef)
10488         Flags.setByRef();
10489       if (Args[i].IsPreallocated) {
10490         Flags.setPreallocated();
10491         // Set the byval flag for CCAssignFn callbacks that don't know about
10492         // preallocated.  This way we can know how many bytes we should've
10493         // allocated and how many bytes a callee cleanup function will pop.  If
10494         // we port preallocated to more targets, we'll have to add custom
10495         // preallocated handling in the various CC lowering callbacks.
10496         Flags.setByVal();
10497       }
10498       if (Args[i].IsInAlloca) {
10499         Flags.setInAlloca();
10500         // Set the byval flag for CCAssignFn callbacks that don't know about
10501         // inalloca.  This way we can know how many bytes we should've allocated
10502         // and how many bytes a callee cleanup function will pop.  If we port
10503         // inalloca to more targets, we'll have to add custom inalloca handling
10504         // in the various CC lowering callbacks.
10505         Flags.setByVal();
10506       }
10507       Align MemAlign;
10508       if (Args[i].IsByVal || Args[i].IsInAlloca || Args[i].IsPreallocated) {
10509         unsigned FrameSize = DL.getTypeAllocSize(Args[i].IndirectType);
10510         Flags.setByValSize(FrameSize);
10511 
10512         // info is not there but there are cases it cannot get right.
10513         if (auto MA = Args[i].Alignment)
10514           MemAlign = *MA;
10515         else
10516           MemAlign = Align(getByValTypeAlignment(Args[i].IndirectType, DL));
10517       } else if (auto MA = Args[i].Alignment) {
10518         MemAlign = *MA;
10519       } else {
10520         MemAlign = OriginalAlignment;
10521       }
10522       Flags.setMemAlign(MemAlign);
10523       if (Args[i].IsNest)
10524         Flags.setNest();
10525       if (NeedsRegBlock)
10526         Flags.setInConsecutiveRegs();
10527 
10528       MVT PartVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
10529                                                  CLI.CallConv, VT);
10530       unsigned NumParts = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
10531                                                         CLI.CallConv, VT);
10532       SmallVector<SDValue, 4> Parts(NumParts);
10533       ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
10534 
10535       if (Args[i].IsSExt)
10536         ExtendKind = ISD::SIGN_EXTEND;
10537       else if (Args[i].IsZExt)
10538         ExtendKind = ISD::ZERO_EXTEND;
10539 
10540       // Conservatively only handle 'returned' on non-vectors that can be lowered,
10541       // for now.
10542       if (Args[i].IsReturned && !Op.getValueType().isVector() &&
10543           CanLowerReturn) {
10544         assert((CLI.RetTy == Args[i].Ty ||
10545                 (CLI.RetTy->isPointerTy() && Args[i].Ty->isPointerTy() &&
10546                  CLI.RetTy->getPointerAddressSpace() ==
10547                      Args[i].Ty->getPointerAddressSpace())) &&
10548                RetTys.size() == NumValues && "unexpected use of 'returned'");
10549         // Before passing 'returned' to the target lowering code, ensure that
10550         // either the register MVT and the actual EVT are the same size or that
10551         // the return value and argument are extended in the same way; in these
10552         // cases it's safe to pass the argument register value unchanged as the
10553         // return register value (although it's at the target's option whether
10554         // to do so)
10555         // TODO: allow code generation to take advantage of partially preserved
10556         // registers rather than clobbering the entire register when the
10557         // parameter extension method is not compatible with the return
10558         // extension method
10559         if ((NumParts * PartVT.getSizeInBits() == VT.getSizeInBits()) ||
10560             (ExtendKind != ISD::ANY_EXTEND && CLI.RetSExt == Args[i].IsSExt &&
10561              CLI.RetZExt == Args[i].IsZExt))
10562           Flags.setReturned();
10563       }
10564 
10565       getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT, CLI.CB,
10566                      CLI.CallConv, ExtendKind);
10567 
10568       for (unsigned j = 0; j != NumParts; ++j) {
10569         // if it isn't first piece, alignment must be 1
10570         // For scalable vectors the scalable part is currently handled
10571         // by individual targets, so we just use the known minimum size here.
10572         ISD::OutputArg MyFlags(
10573             Flags, Parts[j].getValueType().getSimpleVT(), VT,
10574             i < CLI.NumFixedArgs, i,
10575             j * Parts[j].getValueType().getStoreSize().getKnownMinValue());
10576         if (NumParts > 1 && j == 0)
10577           MyFlags.Flags.setSplit();
10578         else if (j != 0) {
10579           MyFlags.Flags.setOrigAlign(Align(1));
10580           if (j == NumParts - 1)
10581             MyFlags.Flags.setSplitEnd();
10582         }
10583 
10584         CLI.Outs.push_back(MyFlags);
10585         CLI.OutVals.push_back(Parts[j]);
10586       }
10587 
10588       if (NeedsRegBlock && Value == NumValues - 1)
10589         CLI.Outs[CLI.Outs.size() - 1].Flags.setInConsecutiveRegsLast();
10590     }
10591   }
10592 
10593   SmallVector<SDValue, 4> InVals;
10594   CLI.Chain = LowerCall(CLI, InVals);
10595 
10596   // Update CLI.InVals to use outside of this function.
10597   CLI.InVals = InVals;
10598 
10599   // Verify that the target's LowerCall behaved as expected.
10600   assert(CLI.Chain.getNode() && CLI.Chain.getValueType() == MVT::Other &&
10601          "LowerCall didn't return a valid chain!");
10602   assert((!CLI.IsTailCall || InVals.empty()) &&
10603          "LowerCall emitted a return value for a tail call!");
10604   assert((CLI.IsTailCall || InVals.size() == CLI.Ins.size()) &&
10605          "LowerCall didn't emit the correct number of values!");
10606 
10607   // For a tail call, the return value is merely live-out and there aren't
10608   // any nodes in the DAG representing it. Return a special value to
10609   // indicate that a tail call has been emitted and no more Instructions
10610   // should be processed in the current block.
10611   if (CLI.IsTailCall) {
10612     CLI.DAG.setRoot(CLI.Chain);
10613     return std::make_pair(SDValue(), SDValue());
10614   }
10615 
10616 #ifndef NDEBUG
10617   for (unsigned i = 0, e = CLI.Ins.size(); i != e; ++i) {
10618     assert(InVals[i].getNode() && "LowerCall emitted a null value!");
10619     assert(EVT(CLI.Ins[i].VT) == InVals[i].getValueType() &&
10620            "LowerCall emitted a value with the wrong type!");
10621   }
10622 #endif
10623 
10624   SmallVector<SDValue, 4> ReturnValues;
10625   if (!CanLowerReturn) {
10626     // The instruction result is the result of loading from the
10627     // hidden sret parameter.
10628     SmallVector<EVT, 1> PVTs;
10629     Type *PtrRetTy =
10630         PointerType::get(OrigRetTy->getContext(), DL.getAllocaAddrSpace());
10631 
10632     ComputeValueVTs(*this, DL, PtrRetTy, PVTs);
10633     assert(PVTs.size() == 1 && "Pointers should fit in one register");
10634     EVT PtrVT = PVTs[0];
10635 
10636     unsigned NumValues = RetTys.size();
10637     ReturnValues.resize(NumValues);
10638     SmallVector<SDValue, 4> Chains(NumValues);
10639 
10640     // An aggregate return value cannot wrap around the address space, so
10641     // offsets to its parts don't wrap either.
10642     SDNodeFlags Flags;
10643     Flags.setNoUnsignedWrap(true);
10644 
10645     MachineFunction &MF = CLI.DAG.getMachineFunction();
10646     Align HiddenSRetAlign = MF.getFrameInfo().getObjectAlign(DemoteStackIdx);
10647     for (unsigned i = 0; i < NumValues; ++i) {
10648       SDValue Add = CLI.DAG.getNode(ISD::ADD, CLI.DL, PtrVT, DemoteStackSlot,
10649                                     CLI.DAG.getConstant(Offsets[i], CLI.DL,
10650                                                         PtrVT), Flags);
10651       SDValue L = CLI.DAG.getLoad(
10652           RetTys[i], CLI.DL, CLI.Chain, Add,
10653           MachinePointerInfo::getFixedStack(CLI.DAG.getMachineFunction(),
10654                                             DemoteStackIdx, Offsets[i]),
10655           HiddenSRetAlign);
10656       ReturnValues[i] = L;
10657       Chains[i] = L.getValue(1);
10658     }
10659 
10660     CLI.Chain = CLI.DAG.getNode(ISD::TokenFactor, CLI.DL, MVT::Other, Chains);
10661   } else {
10662     // Collect the legal value parts into potentially illegal values
10663     // that correspond to the original function's return values.
10664     std::optional<ISD::NodeType> AssertOp;
10665     if (CLI.RetSExt)
10666       AssertOp = ISD::AssertSext;
10667     else if (CLI.RetZExt)
10668       AssertOp = ISD::AssertZext;
10669     unsigned CurReg = 0;
10670     for (EVT VT : RetTys) {
10671       MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
10672                                                      CLI.CallConv, VT);
10673       unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
10674                                                        CLI.CallConv, VT);
10675 
10676       ReturnValues.push_back(getCopyFromParts(
10677           CLI.DAG, CLI.DL, &InVals[CurReg], NumRegs, RegisterVT, VT, nullptr,
10678           CLI.Chain, CLI.CallConv, AssertOp));
10679       CurReg += NumRegs;
10680     }
10681 
10682     // For a function returning void, there is no return value. We can't create
10683     // such a node, so we just return a null return value in that case. In
10684     // that case, nothing will actually look at the value.
10685     if (ReturnValues.empty())
10686       return std::make_pair(SDValue(), CLI.Chain);
10687   }
10688 
10689   SDValue Res = CLI.DAG.getNode(ISD::MERGE_VALUES, CLI.DL,
10690                                 CLI.DAG.getVTList(RetTys), ReturnValues);
10691   return std::make_pair(Res, CLI.Chain);
10692 }
10693 
10694 /// Places new result values for the node in Results (their number
10695 /// and types must exactly match those of the original return values of
10696 /// the node), or leaves Results empty, which indicates that the node is not
10697 /// to be custom lowered after all.
10698 void TargetLowering::LowerOperationWrapper(SDNode *N,
10699                                            SmallVectorImpl<SDValue> &Results,
10700                                            SelectionDAG &DAG) const {
10701   SDValue Res = LowerOperation(SDValue(N, 0), DAG);
10702 
10703   if (!Res.getNode())
10704     return;
10705 
10706   // If the original node has one result, take the return value from
10707   // LowerOperation as is. It might not be result number 0.
10708   if (N->getNumValues() == 1) {
10709     Results.push_back(Res);
10710     return;
10711   }
10712 
10713   // If the original node has multiple results, then the return node should
10714   // have the same number of results.
10715   assert((N->getNumValues() == Res->getNumValues()) &&
10716       "Lowering returned the wrong number of results!");
10717 
10718   // Places new result values base on N result number.
10719   for (unsigned I = 0, E = N->getNumValues(); I != E; ++I)
10720     Results.push_back(Res.getValue(I));
10721 }
10722 
10723 SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
10724   llvm_unreachable("LowerOperation not implemented for this target!");
10725 }
10726 
10727 void SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V,
10728                                                      unsigned Reg,
10729                                                      ISD::NodeType ExtendType) {
10730   SDValue Op = getNonRegisterValue(V);
10731   assert((Op.getOpcode() != ISD::CopyFromReg ||
10732           cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
10733          "Copy from a reg to the same reg!");
10734   assert(!Register::isPhysicalRegister(Reg) && "Is a physreg");
10735 
10736   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10737   // If this is an InlineAsm we have to match the registers required, not the
10738   // notional registers required by the type.
10739 
10740   RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg, V->getType(),
10741                    std::nullopt); // This is not an ABI copy.
10742   SDValue Chain = DAG.getEntryNode();
10743 
10744   if (ExtendType == ISD::ANY_EXTEND) {
10745     auto PreferredExtendIt = FuncInfo.PreferredExtendType.find(V);
10746     if (PreferredExtendIt != FuncInfo.PreferredExtendType.end())
10747       ExtendType = PreferredExtendIt->second;
10748   }
10749   RFV.getCopyToRegs(Op, DAG, getCurSDLoc(), Chain, nullptr, V, ExtendType);
10750   PendingExports.push_back(Chain);
10751 }
10752 
10753 #include "llvm/CodeGen/SelectionDAGISel.h"
10754 
10755 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
10756 /// entry block, return true.  This includes arguments used by switches, since
10757 /// the switch may expand into multiple basic blocks.
10758 static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) {
10759   // With FastISel active, we may be splitting blocks, so force creation
10760   // of virtual registers for all non-dead arguments.
10761   if (FastISel)
10762     return A->use_empty();
10763 
10764   const BasicBlock &Entry = A->getParent()->front();
10765   for (const User *U : A->users())
10766     if (cast<Instruction>(U)->getParent() != &Entry || isa<SwitchInst>(U))
10767       return false;  // Use not in entry block.
10768 
10769   return true;
10770 }
10771 
10772 using ArgCopyElisionMapTy =
10773     DenseMap<const Argument *,
10774              std::pair<const AllocaInst *, const StoreInst *>>;
10775 
10776 /// Scan the entry block of the function in FuncInfo for arguments that look
10777 /// like copies into a local alloca. Record any copied arguments in
10778 /// ArgCopyElisionCandidates.
10779 static void
10780 findArgumentCopyElisionCandidates(const DataLayout &DL,
10781                                   FunctionLoweringInfo *FuncInfo,
10782                                   ArgCopyElisionMapTy &ArgCopyElisionCandidates) {
10783   // Record the state of every static alloca used in the entry block. Argument
10784   // allocas are all used in the entry block, so we need approximately as many
10785   // entries as we have arguments.
10786   enum StaticAllocaInfo { Unknown, Clobbered, Elidable };
10787   SmallDenseMap<const AllocaInst *, StaticAllocaInfo, 8> StaticAllocas;
10788   unsigned NumArgs = FuncInfo->Fn->arg_size();
10789   StaticAllocas.reserve(NumArgs * 2);
10790 
10791   auto GetInfoIfStaticAlloca = [&](const Value *V) -> StaticAllocaInfo * {
10792     if (!V)
10793       return nullptr;
10794     V = V->stripPointerCasts();
10795     const auto *AI = dyn_cast<AllocaInst>(V);
10796     if (!AI || !AI->isStaticAlloca() || !FuncInfo->StaticAllocaMap.count(AI))
10797       return nullptr;
10798     auto Iter = StaticAllocas.insert({AI, Unknown});
10799     return &Iter.first->second;
10800   };
10801 
10802   // Look for stores of arguments to static allocas. Look through bitcasts and
10803   // GEPs to handle type coercions, as long as the alloca is fully initialized
10804   // by the store. Any non-store use of an alloca escapes it and any subsequent
10805   // unanalyzed store might write it.
10806   // FIXME: Handle structs initialized with multiple stores.
10807   for (const Instruction &I : FuncInfo->Fn->getEntryBlock()) {
10808     // Look for stores, and handle non-store uses conservatively.
10809     const auto *SI = dyn_cast<StoreInst>(&I);
10810     if (!SI) {
10811       // We will look through cast uses, so ignore them completely.
10812       if (I.isCast())
10813         continue;
10814       // Ignore debug info and pseudo op intrinsics, they don't escape or store
10815       // to allocas.
10816       if (I.isDebugOrPseudoInst())
10817         continue;
10818       // This is an unknown instruction. Assume it escapes or writes to all
10819       // static alloca operands.
10820       for (const Use &U : I.operands()) {
10821         if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(U))
10822           *Info = StaticAllocaInfo::Clobbered;
10823       }
10824       continue;
10825     }
10826 
10827     // If the stored value is a static alloca, mark it as escaped.
10828     if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(SI->getValueOperand()))
10829       *Info = StaticAllocaInfo::Clobbered;
10830 
10831     // Check if the destination is a static alloca.
10832     const Value *Dst = SI->getPointerOperand()->stripPointerCasts();
10833     StaticAllocaInfo *Info = GetInfoIfStaticAlloca(Dst);
10834     if (!Info)
10835       continue;
10836     const AllocaInst *AI = cast<AllocaInst>(Dst);
10837 
10838     // Skip allocas that have been initialized or clobbered.
10839     if (*Info != StaticAllocaInfo::Unknown)
10840       continue;
10841 
10842     // Check if the stored value is an argument, and that this store fully
10843     // initializes the alloca.
10844     // If the argument type has padding bits we can't directly forward a pointer
10845     // as the upper bits may contain garbage.
10846     // Don't elide copies from the same argument twice.
10847     const Value *Val = SI->getValueOperand()->stripPointerCasts();
10848     const auto *Arg = dyn_cast<Argument>(Val);
10849     if (!Arg || Arg->hasPassPointeeByValueCopyAttr() ||
10850         Arg->getType()->isEmptyTy() ||
10851         DL.getTypeStoreSize(Arg->getType()) !=
10852             DL.getTypeAllocSize(AI->getAllocatedType()) ||
10853         !DL.typeSizeEqualsStoreSize(Arg->getType()) ||
10854         ArgCopyElisionCandidates.count(Arg)) {
10855       *Info = StaticAllocaInfo::Clobbered;
10856       continue;
10857     }
10858 
10859     LLVM_DEBUG(dbgs() << "Found argument copy elision candidate: " << *AI
10860                       << '\n');
10861 
10862     // Mark this alloca and store for argument copy elision.
10863     *Info = StaticAllocaInfo::Elidable;
10864     ArgCopyElisionCandidates.insert({Arg, {AI, SI}});
10865 
10866     // Stop scanning if we've seen all arguments. This will happen early in -O0
10867     // builds, which is useful, because -O0 builds have large entry blocks and
10868     // many allocas.
10869     if (ArgCopyElisionCandidates.size() == NumArgs)
10870       break;
10871   }
10872 }
10873 
10874 /// Try to elide argument copies from memory into a local alloca. Succeeds if
10875 /// ArgVal is a load from a suitable fixed stack object.
10876 static void tryToElideArgumentCopy(
10877     FunctionLoweringInfo &FuncInfo, SmallVectorImpl<SDValue> &Chains,
10878     DenseMap<int, int> &ArgCopyElisionFrameIndexMap,
10879     SmallPtrSetImpl<const Instruction *> &ElidedArgCopyInstrs,
10880     ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg,
10881     ArrayRef<SDValue> ArgVals, bool &ArgHasUses) {
10882   // Check if this is a load from a fixed stack object.
10883   auto *LNode = dyn_cast<LoadSDNode>(ArgVals[0]);
10884   if (!LNode)
10885     return;
10886   auto *FINode = dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode());
10887   if (!FINode)
10888     return;
10889 
10890   // Check that the fixed stack object is the right size and alignment.
10891   // Look at the alignment that the user wrote on the alloca instead of looking
10892   // at the stack object.
10893   auto ArgCopyIter = ArgCopyElisionCandidates.find(&Arg);
10894   assert(ArgCopyIter != ArgCopyElisionCandidates.end());
10895   const AllocaInst *AI = ArgCopyIter->second.first;
10896   int FixedIndex = FINode->getIndex();
10897   int &AllocaIndex = FuncInfo.StaticAllocaMap[AI];
10898   int OldIndex = AllocaIndex;
10899   MachineFrameInfo &MFI = FuncInfo.MF->getFrameInfo();
10900   if (MFI.getObjectSize(FixedIndex) != MFI.getObjectSize(OldIndex)) {
10901     LLVM_DEBUG(
10902         dbgs() << "  argument copy elision failed due to bad fixed stack "
10903                   "object size\n");
10904     return;
10905   }
10906   Align RequiredAlignment = AI->getAlign();
10907   if (MFI.getObjectAlign(FixedIndex) < RequiredAlignment) {
10908     LLVM_DEBUG(dbgs() << "  argument copy elision failed: alignment of alloca "
10909                          "greater than stack argument alignment ("
10910                       << DebugStr(RequiredAlignment) << " vs "
10911                       << DebugStr(MFI.getObjectAlign(FixedIndex)) << ")\n");
10912     return;
10913   }
10914 
10915   // Perform the elision. Delete the old stack object and replace its only use
10916   // in the variable info map. Mark the stack object as mutable.
10917   LLVM_DEBUG({
10918     dbgs() << "Eliding argument copy from " << Arg << " to " << *AI << '\n'
10919            << "  Replacing frame index " << OldIndex << " with " << FixedIndex
10920            << '\n';
10921   });
10922   MFI.RemoveStackObject(OldIndex);
10923   MFI.setIsImmutableObjectIndex(FixedIndex, false);
10924   AllocaIndex = FixedIndex;
10925   ArgCopyElisionFrameIndexMap.insert({OldIndex, FixedIndex});
10926   for (SDValue ArgVal : ArgVals)
10927     Chains.push_back(ArgVal.getValue(1));
10928 
10929   // Avoid emitting code for the store implementing the copy.
10930   const StoreInst *SI = ArgCopyIter->second.second;
10931   ElidedArgCopyInstrs.insert(SI);
10932 
10933   // Check for uses of the argument again so that we can avoid exporting ArgVal
10934   // if it is't used by anything other than the store.
10935   for (const Value *U : Arg.users()) {
10936     if (U != SI) {
10937       ArgHasUses = true;
10938       break;
10939     }
10940   }
10941 }
10942 
10943 void SelectionDAGISel::LowerArguments(const Function &F) {
10944   SelectionDAG &DAG = SDB->DAG;
10945   SDLoc dl = SDB->getCurSDLoc();
10946   const DataLayout &DL = DAG.getDataLayout();
10947   SmallVector<ISD::InputArg, 16> Ins;
10948 
10949   // In Naked functions we aren't going to save any registers.
10950   if (F.hasFnAttribute(Attribute::Naked))
10951     return;
10952 
10953   if (!FuncInfo->CanLowerReturn) {
10954     // Put in an sret pointer parameter before all the other parameters.
10955     SmallVector<EVT, 1> ValueVTs;
10956     ComputeValueVTs(*TLI, DAG.getDataLayout(),
10957                     PointerType::get(F.getContext(),
10958                                      DAG.getDataLayout().getAllocaAddrSpace()),
10959                     ValueVTs);
10960 
10961     // NOTE: Assuming that a pointer will never break down to more than one VT
10962     // or one register.
10963     ISD::ArgFlagsTy Flags;
10964     Flags.setSRet();
10965     MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVTs[0]);
10966     ISD::InputArg RetArg(Flags, RegisterVT, ValueVTs[0], true,
10967                          ISD::InputArg::NoArgIndex, 0);
10968     Ins.push_back(RetArg);
10969   }
10970 
10971   // Look for stores of arguments to static allocas. Mark such arguments with a
10972   // flag to ask the target to give us the memory location of that argument if
10973   // available.
10974   ArgCopyElisionMapTy ArgCopyElisionCandidates;
10975   findArgumentCopyElisionCandidates(DL, FuncInfo.get(),
10976                                     ArgCopyElisionCandidates);
10977 
10978   // Set up the incoming argument description vector.
10979   for (const Argument &Arg : F.args()) {
10980     unsigned ArgNo = Arg.getArgNo();
10981     SmallVector<EVT, 4> ValueVTs;
10982     ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
10983     bool isArgValueUsed = !Arg.use_empty();
10984     unsigned PartBase = 0;
10985     Type *FinalType = Arg.getType();
10986     if (Arg.hasAttribute(Attribute::ByVal))
10987       FinalType = Arg.getParamByValType();
10988     bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
10989         FinalType, F.getCallingConv(), F.isVarArg(), DL);
10990     for (unsigned Value = 0, NumValues = ValueVTs.size();
10991          Value != NumValues; ++Value) {
10992       EVT VT = ValueVTs[Value];
10993       Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
10994       ISD::ArgFlagsTy Flags;
10995 
10996 
10997       if (Arg.getType()->isPointerTy()) {
10998         Flags.setPointer();
10999         Flags.setPointerAddrSpace(
11000             cast<PointerType>(Arg.getType())->getAddressSpace());
11001       }
11002       if (Arg.hasAttribute(Attribute::ZExt))
11003         Flags.setZExt();
11004       if (Arg.hasAttribute(Attribute::SExt))
11005         Flags.setSExt();
11006       if (Arg.hasAttribute(Attribute::InReg)) {
11007         // If we are using vectorcall calling convention, a structure that is
11008         // passed InReg - is surely an HVA
11009         if (F.getCallingConv() == CallingConv::X86_VectorCall &&
11010             isa<StructType>(Arg.getType())) {
11011           // The first value of a structure is marked
11012           if (0 == Value)
11013             Flags.setHvaStart();
11014           Flags.setHva();
11015         }
11016         // Set InReg Flag
11017         Flags.setInReg();
11018       }
11019       if (Arg.hasAttribute(Attribute::StructRet))
11020         Flags.setSRet();
11021       if (Arg.hasAttribute(Attribute::SwiftSelf))
11022         Flags.setSwiftSelf();
11023       if (Arg.hasAttribute(Attribute::SwiftAsync))
11024         Flags.setSwiftAsync();
11025       if (Arg.hasAttribute(Attribute::SwiftError))
11026         Flags.setSwiftError();
11027       if (Arg.hasAttribute(Attribute::ByVal))
11028         Flags.setByVal();
11029       if (Arg.hasAttribute(Attribute::ByRef))
11030         Flags.setByRef();
11031       if (Arg.hasAttribute(Attribute::InAlloca)) {
11032         Flags.setInAlloca();
11033         // Set the byval flag for CCAssignFn callbacks that don't know about
11034         // inalloca.  This way we can know how many bytes we should've allocated
11035         // and how many bytes a callee cleanup function will pop.  If we port
11036         // inalloca to more targets, we'll have to add custom inalloca handling
11037         // in the various CC lowering callbacks.
11038         Flags.setByVal();
11039       }
11040       if (Arg.hasAttribute(Attribute::Preallocated)) {
11041         Flags.setPreallocated();
11042         // Set the byval flag for CCAssignFn callbacks that don't know about
11043         // preallocated.  This way we can know how many bytes we should've
11044         // allocated and how many bytes a callee cleanup function will pop.  If
11045         // we port preallocated to more targets, we'll have to add custom
11046         // preallocated handling in the various CC lowering callbacks.
11047         Flags.setByVal();
11048       }
11049 
11050       // Certain targets (such as MIPS), may have a different ABI alignment
11051       // for a type depending on the context. Give the target a chance to
11052       // specify the alignment it wants.
11053       const Align OriginalAlignment(
11054           TLI->getABIAlignmentForCallingConv(ArgTy, DL));
11055       Flags.setOrigAlign(OriginalAlignment);
11056 
11057       Align MemAlign;
11058       Type *ArgMemTy = nullptr;
11059       if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated() ||
11060           Flags.isByRef()) {
11061         if (!ArgMemTy)
11062           ArgMemTy = Arg.getPointeeInMemoryValueType();
11063 
11064         uint64_t MemSize = DL.getTypeAllocSize(ArgMemTy);
11065 
11066         // For in-memory arguments, size and alignment should be passed from FE.
11067         // BE will guess if this info is not there but there are cases it cannot
11068         // get right.
11069         if (auto ParamAlign = Arg.getParamStackAlign())
11070           MemAlign = *ParamAlign;
11071         else if ((ParamAlign = Arg.getParamAlign()))
11072           MemAlign = *ParamAlign;
11073         else
11074           MemAlign = Align(TLI->getByValTypeAlignment(ArgMemTy, DL));
11075         if (Flags.isByRef())
11076           Flags.setByRefSize(MemSize);
11077         else
11078           Flags.setByValSize(MemSize);
11079       } else if (auto ParamAlign = Arg.getParamStackAlign()) {
11080         MemAlign = *ParamAlign;
11081       } else {
11082         MemAlign = OriginalAlignment;
11083       }
11084       Flags.setMemAlign(MemAlign);
11085 
11086       if (Arg.hasAttribute(Attribute::Nest))
11087         Flags.setNest();
11088       if (NeedsRegBlock)
11089         Flags.setInConsecutiveRegs();
11090       if (ArgCopyElisionCandidates.count(&Arg))
11091         Flags.setCopyElisionCandidate();
11092       if (Arg.hasAttribute(Attribute::Returned))
11093         Flags.setReturned();
11094 
11095       MVT RegisterVT = TLI->getRegisterTypeForCallingConv(
11096           *CurDAG->getContext(), F.getCallingConv(), VT);
11097       unsigned NumRegs = TLI->getNumRegistersForCallingConv(
11098           *CurDAG->getContext(), F.getCallingConv(), VT);
11099       for (unsigned i = 0; i != NumRegs; ++i) {
11100         // For scalable vectors, use the minimum size; individual targets
11101         // are responsible for handling scalable vector arguments and
11102         // return values.
11103         ISD::InputArg MyFlags(
11104             Flags, RegisterVT, VT, isArgValueUsed, ArgNo,
11105             PartBase + i * RegisterVT.getStoreSize().getKnownMinValue());
11106         if (NumRegs > 1 && i == 0)
11107           MyFlags.Flags.setSplit();
11108         // if it isn't first piece, alignment must be 1
11109         else if (i > 0) {
11110           MyFlags.Flags.setOrigAlign(Align(1));
11111           if (i == NumRegs - 1)
11112             MyFlags.Flags.setSplitEnd();
11113         }
11114         Ins.push_back(MyFlags);
11115       }
11116       if (NeedsRegBlock && Value == NumValues - 1)
11117         Ins[Ins.size() - 1].Flags.setInConsecutiveRegsLast();
11118       PartBase += VT.getStoreSize().getKnownMinValue();
11119     }
11120   }
11121 
11122   // Call the target to set up the argument values.
11123   SmallVector<SDValue, 8> InVals;
11124   SDValue NewRoot = TLI->LowerFormalArguments(
11125       DAG.getRoot(), F.getCallingConv(), F.isVarArg(), Ins, dl, DAG, InVals);
11126 
11127   // Verify that the target's LowerFormalArguments behaved as expected.
11128   assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other &&
11129          "LowerFormalArguments didn't return a valid chain!");
11130   assert(InVals.size() == Ins.size() &&
11131          "LowerFormalArguments didn't emit the correct number of values!");
11132   LLVM_DEBUG({
11133     for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
11134       assert(InVals[i].getNode() &&
11135              "LowerFormalArguments emitted a null value!");
11136       assert(EVT(Ins[i].VT) == InVals[i].getValueType() &&
11137              "LowerFormalArguments emitted a value with the wrong type!");
11138     }
11139   });
11140 
11141   // Update the DAG with the new chain value resulting from argument lowering.
11142   DAG.setRoot(NewRoot);
11143 
11144   // Set up the argument values.
11145   unsigned i = 0;
11146   if (!FuncInfo->CanLowerReturn) {
11147     // Create a virtual register for the sret pointer, and put in a copy
11148     // from the sret argument into it.
11149     SmallVector<EVT, 1> ValueVTs;
11150     ComputeValueVTs(*TLI, DAG.getDataLayout(),
11151                     PointerType::get(F.getContext(),
11152                                      DAG.getDataLayout().getAllocaAddrSpace()),
11153                     ValueVTs);
11154     MVT VT = ValueVTs[0].getSimpleVT();
11155     MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
11156     std::optional<ISD::NodeType> AssertOp;
11157     SDValue ArgValue =
11158         getCopyFromParts(DAG, dl, &InVals[0], 1, RegVT, VT, nullptr, NewRoot,
11159                          F.getCallingConv(), AssertOp);
11160 
11161     MachineFunction& MF = SDB->DAG.getMachineFunction();
11162     MachineRegisterInfo& RegInfo = MF.getRegInfo();
11163     Register SRetReg =
11164         RegInfo.createVirtualRegister(TLI->getRegClassFor(RegVT));
11165     FuncInfo->DemoteRegister = SRetReg;
11166     NewRoot =
11167         SDB->DAG.getCopyToReg(NewRoot, SDB->getCurSDLoc(), SRetReg, ArgValue);
11168     DAG.setRoot(NewRoot);
11169 
11170     // i indexes lowered arguments.  Bump it past the hidden sret argument.
11171     ++i;
11172   }
11173 
11174   SmallVector<SDValue, 4> Chains;
11175   DenseMap<int, int> ArgCopyElisionFrameIndexMap;
11176   for (const Argument &Arg : F.args()) {
11177     SmallVector<SDValue, 4> ArgValues;
11178     SmallVector<EVT, 4> ValueVTs;
11179     ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
11180     unsigned NumValues = ValueVTs.size();
11181     if (NumValues == 0)
11182       continue;
11183 
11184     bool ArgHasUses = !Arg.use_empty();
11185 
11186     // Elide the copying store if the target loaded this argument from a
11187     // suitable fixed stack object.
11188     if (Ins[i].Flags.isCopyElisionCandidate()) {
11189       unsigned NumParts = 0;
11190       for (EVT VT : ValueVTs)
11191         NumParts += TLI->getNumRegistersForCallingConv(*CurDAG->getContext(),
11192                                                        F.getCallingConv(), VT);
11193 
11194       tryToElideArgumentCopy(*FuncInfo, Chains, ArgCopyElisionFrameIndexMap,
11195                              ElidedArgCopyInstrs, ArgCopyElisionCandidates, Arg,
11196                              ArrayRef(&InVals[i], NumParts), ArgHasUses);
11197     }
11198 
11199     // If this argument is unused then remember its value. It is used to generate
11200     // debugging information.
11201     bool isSwiftErrorArg =
11202         TLI->supportSwiftError() &&
11203         Arg.hasAttribute(Attribute::SwiftError);
11204     if (!ArgHasUses && !isSwiftErrorArg) {
11205       SDB->setUnusedArgValue(&Arg, InVals[i]);
11206 
11207       // Also remember any frame index for use in FastISel.
11208       if (FrameIndexSDNode *FI =
11209           dyn_cast<FrameIndexSDNode>(InVals[i].getNode()))
11210         FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11211     }
11212 
11213     for (unsigned Val = 0; Val != NumValues; ++Val) {
11214       EVT VT = ValueVTs[Val];
11215       MVT PartVT = TLI->getRegisterTypeForCallingConv(*CurDAG->getContext(),
11216                                                       F.getCallingConv(), VT);
11217       unsigned NumParts = TLI->getNumRegistersForCallingConv(
11218           *CurDAG->getContext(), F.getCallingConv(), VT);
11219 
11220       // Even an apparent 'unused' swifterror argument needs to be returned. So
11221       // we do generate a copy for it that can be used on return from the
11222       // function.
11223       if (ArgHasUses || isSwiftErrorArg) {
11224         std::optional<ISD::NodeType> AssertOp;
11225         if (Arg.hasAttribute(Attribute::SExt))
11226           AssertOp = ISD::AssertSext;
11227         else if (Arg.hasAttribute(Attribute::ZExt))
11228           AssertOp = ISD::AssertZext;
11229 
11230         ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], NumParts,
11231                                              PartVT, VT, nullptr, NewRoot,
11232                                              F.getCallingConv(), AssertOp));
11233       }
11234 
11235       i += NumParts;
11236     }
11237 
11238     // We don't need to do anything else for unused arguments.
11239     if (ArgValues.empty())
11240       continue;
11241 
11242     // Note down frame index.
11243     if (FrameIndexSDNode *FI =
11244         dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode()))
11245       FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11246 
11247     SDValue Res = DAG.getMergeValues(ArrayRef(ArgValues.data(), NumValues),
11248                                      SDB->getCurSDLoc());
11249 
11250     SDB->setValue(&Arg, Res);
11251     if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::BUILD_PAIR) {
11252       // We want to associate the argument with the frame index, among
11253       // involved operands, that correspond to the lowest address. The
11254       // getCopyFromParts function, called earlier, is swapping the order of
11255       // the operands to BUILD_PAIR depending on endianness. The result of
11256       // that swapping is that the least significant bits of the argument will
11257       // be in the first operand of the BUILD_PAIR node, and the most
11258       // significant bits will be in the second operand.
11259       unsigned LowAddressOp = DAG.getDataLayout().isBigEndian() ? 1 : 0;
11260       if (LoadSDNode *LNode =
11261           dyn_cast<LoadSDNode>(Res.getOperand(LowAddressOp).getNode()))
11262         if (FrameIndexSDNode *FI =
11263             dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
11264           FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11265     }
11266 
11267     // Analyses past this point are naive and don't expect an assertion.
11268     if (Res.getOpcode() == ISD::AssertZext)
11269       Res = Res.getOperand(0);
11270 
11271     // Update the SwiftErrorVRegDefMap.
11272     if (Res.getOpcode() == ISD::CopyFromReg && isSwiftErrorArg) {
11273       unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
11274       if (Register::isVirtualRegister(Reg))
11275         SwiftError->setCurrentVReg(FuncInfo->MBB, SwiftError->getFunctionArg(),
11276                                    Reg);
11277     }
11278 
11279     // If this argument is live outside of the entry block, insert a copy from
11280     // wherever we got it to the vreg that other BB's will reference it as.
11281     if (Res.getOpcode() == ISD::CopyFromReg) {
11282       // If we can, though, try to skip creating an unnecessary vreg.
11283       // FIXME: This isn't very clean... it would be nice to make this more
11284       // general.
11285       unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
11286       if (Register::isVirtualRegister(Reg)) {
11287         FuncInfo->ValueMap[&Arg] = Reg;
11288         continue;
11289       }
11290     }
11291     if (!isOnlyUsedInEntryBlock(&Arg, TM.Options.EnableFastISel)) {
11292       FuncInfo->InitializeRegForValue(&Arg);
11293       SDB->CopyToExportRegsIfNeeded(&Arg);
11294     }
11295   }
11296 
11297   if (!Chains.empty()) {
11298     Chains.push_back(NewRoot);
11299     NewRoot = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
11300   }
11301 
11302   DAG.setRoot(NewRoot);
11303 
11304   assert(i == InVals.size() && "Argument register count mismatch!");
11305 
11306   // If any argument copy elisions occurred and we have debug info, update the
11307   // stale frame indices used in the dbg.declare variable info table.
11308   if (!ArgCopyElisionFrameIndexMap.empty()) {
11309     for (MachineFunction::VariableDbgInfo &VI :
11310          MF->getInStackSlotVariableDbgInfo()) {
11311       auto I = ArgCopyElisionFrameIndexMap.find(VI.getStackSlot());
11312       if (I != ArgCopyElisionFrameIndexMap.end())
11313         VI.updateStackSlot(I->second);
11314     }
11315   }
11316 
11317   // Finally, if the target has anything special to do, allow it to do so.
11318   emitFunctionEntryCode();
11319 }
11320 
11321 /// Handle PHI nodes in successor blocks.  Emit code into the SelectionDAG to
11322 /// ensure constants are generated when needed.  Remember the virtual registers
11323 /// that need to be added to the Machine PHI nodes as input.  We cannot just
11324 /// directly add them, because expansion might result in multiple MBB's for one
11325 /// BB.  As such, the start of the BB might correspond to a different MBB than
11326 /// the end.
11327 void
11328 SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
11329   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11330 
11331   SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
11332 
11333   // Check PHI nodes in successors that expect a value to be available from this
11334   // block.
11335   for (const BasicBlock *SuccBB : successors(LLVMBB->getTerminator())) {
11336     if (!isa<PHINode>(SuccBB->begin())) continue;
11337     MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
11338 
11339     // If this terminator has multiple identical successors (common for
11340     // switches), only handle each succ once.
11341     if (!SuccsHandled.insert(SuccMBB).second)
11342       continue;
11343 
11344     MachineBasicBlock::iterator MBBI = SuccMBB->begin();
11345 
11346     // At this point we know that there is a 1-1 correspondence between LLVM PHI
11347     // nodes and Machine PHI nodes, but the incoming operands have not been
11348     // emitted yet.
11349     for (const PHINode &PN : SuccBB->phis()) {
11350       // Ignore dead phi's.
11351       if (PN.use_empty())
11352         continue;
11353 
11354       // Skip empty types
11355       if (PN.getType()->isEmptyTy())
11356         continue;
11357 
11358       unsigned Reg;
11359       const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
11360 
11361       if (const auto *C = dyn_cast<Constant>(PHIOp)) {
11362         unsigned &RegOut = ConstantsOut[C];
11363         if (RegOut == 0) {
11364           RegOut = FuncInfo.CreateRegs(C);
11365           // We need to zero/sign extend ConstantInt phi operands to match
11366           // assumptions in FunctionLoweringInfo::ComputePHILiveOutRegInfo.
11367           ISD::NodeType ExtendType = ISD::ANY_EXTEND;
11368           if (auto *CI = dyn_cast<ConstantInt>(C))
11369             ExtendType = TLI.signExtendConstant(CI) ? ISD::SIGN_EXTEND
11370                                                     : ISD::ZERO_EXTEND;
11371           CopyValueToVirtualRegister(C, RegOut, ExtendType);
11372         }
11373         Reg = RegOut;
11374       } else {
11375         DenseMap<const Value *, Register>::iterator I =
11376           FuncInfo.ValueMap.find(PHIOp);
11377         if (I != FuncInfo.ValueMap.end())
11378           Reg = I->second;
11379         else {
11380           assert(isa<AllocaInst>(PHIOp) &&
11381                  FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
11382                  "Didn't codegen value into a register!??");
11383           Reg = FuncInfo.CreateRegs(PHIOp);
11384           CopyValueToVirtualRegister(PHIOp, Reg);
11385         }
11386       }
11387 
11388       // Remember that this register needs to added to the machine PHI node as
11389       // the input for this MBB.
11390       SmallVector<EVT, 4> ValueVTs;
11391       ComputeValueVTs(TLI, DAG.getDataLayout(), PN.getType(), ValueVTs);
11392       for (EVT VT : ValueVTs) {
11393         const unsigned NumRegisters = TLI.getNumRegisters(*DAG.getContext(), VT);
11394         for (unsigned i = 0; i != NumRegisters; ++i)
11395           FuncInfo.PHINodesToUpdate.push_back(
11396               std::make_pair(&*MBBI++, Reg + i));
11397         Reg += NumRegisters;
11398       }
11399     }
11400   }
11401 
11402   ConstantsOut.clear();
11403 }
11404 
11405 MachineBasicBlock *SelectionDAGBuilder::NextBlock(MachineBasicBlock *MBB) {
11406   MachineFunction::iterator I(MBB);
11407   if (++I == FuncInfo.MF->end())
11408     return nullptr;
11409   return &*I;
11410 }
11411 
11412 /// During lowering new call nodes can be created (such as memset, etc.).
11413 /// Those will become new roots of the current DAG, but complications arise
11414 /// when they are tail calls. In such cases, the call lowering will update
11415 /// the root, but the builder still needs to know that a tail call has been
11416 /// lowered in order to avoid generating an additional return.
11417 void SelectionDAGBuilder::updateDAGForMaybeTailCall(SDValue MaybeTC) {
11418   // If the node is null, we do have a tail call.
11419   if (MaybeTC.getNode() != nullptr)
11420     DAG.setRoot(MaybeTC);
11421   else
11422     HasTailCall = true;
11423 }
11424 
11425 void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W, Value *Cond,
11426                                         MachineBasicBlock *SwitchMBB,
11427                                         MachineBasicBlock *DefaultMBB) {
11428   MachineFunction *CurMF = FuncInfo.MF;
11429   MachineBasicBlock *NextMBB = nullptr;
11430   MachineFunction::iterator BBI(W.MBB);
11431   if (++BBI != FuncInfo.MF->end())
11432     NextMBB = &*BBI;
11433 
11434   unsigned Size = W.LastCluster - W.FirstCluster + 1;
11435 
11436   BranchProbabilityInfo *BPI = FuncInfo.BPI;
11437 
11438   if (Size == 2 && W.MBB == SwitchMBB) {
11439     // If any two of the cases has the same destination, and if one value
11440     // is the same as the other, but has one bit unset that the other has set,
11441     // use bit manipulation to do two compares at once.  For example:
11442     // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
11443     // TODO: This could be extended to merge any 2 cases in switches with 3
11444     // cases.
11445     // TODO: Handle cases where W.CaseBB != SwitchBB.
11446     CaseCluster &Small = *W.FirstCluster;
11447     CaseCluster &Big = *W.LastCluster;
11448 
11449     if (Small.Low == Small.High && Big.Low == Big.High &&
11450         Small.MBB == Big.MBB) {
11451       const APInt &SmallValue = Small.Low->getValue();
11452       const APInt &BigValue = Big.Low->getValue();
11453 
11454       // Check that there is only one bit different.
11455       APInt CommonBit = BigValue ^ SmallValue;
11456       if (CommonBit.isPowerOf2()) {
11457         SDValue CondLHS = getValue(Cond);
11458         EVT VT = CondLHS.getValueType();
11459         SDLoc DL = getCurSDLoc();
11460 
11461         SDValue Or = DAG.getNode(ISD::OR, DL, VT, CondLHS,
11462                                  DAG.getConstant(CommonBit, DL, VT));
11463         SDValue Cond = DAG.getSetCC(
11464             DL, MVT::i1, Or, DAG.getConstant(BigValue | SmallValue, DL, VT),
11465             ISD::SETEQ);
11466 
11467         // Update successor info.
11468         // Both Small and Big will jump to Small.BB, so we sum up the
11469         // probabilities.
11470         addSuccessorWithProb(SwitchMBB, Small.MBB, Small.Prob + Big.Prob);
11471         if (BPI)
11472           addSuccessorWithProb(
11473               SwitchMBB, DefaultMBB,
11474               // The default destination is the first successor in IR.
11475               BPI->getEdgeProbability(SwitchMBB->getBasicBlock(), (unsigned)0));
11476         else
11477           addSuccessorWithProb(SwitchMBB, DefaultMBB);
11478 
11479         // Insert the true branch.
11480         SDValue BrCond =
11481             DAG.getNode(ISD::BRCOND, DL, MVT::Other, getControlRoot(), Cond,
11482                         DAG.getBasicBlock(Small.MBB));
11483         // Insert the false branch.
11484         BrCond = DAG.getNode(ISD::BR, DL, MVT::Other, BrCond,
11485                              DAG.getBasicBlock(DefaultMBB));
11486 
11487         DAG.setRoot(BrCond);
11488         return;
11489       }
11490     }
11491   }
11492 
11493   if (TM.getOptLevel() != CodeGenOptLevel::None) {
11494     // Here, we order cases by probability so the most likely case will be
11495     // checked first. However, two clusters can have the same probability in
11496     // which case their relative ordering is non-deterministic. So we use Low
11497     // as a tie-breaker as clusters are guaranteed to never overlap.
11498     llvm::sort(W.FirstCluster, W.LastCluster + 1,
11499                [](const CaseCluster &a, const CaseCluster &b) {
11500       return a.Prob != b.Prob ?
11501              a.Prob > b.Prob :
11502              a.Low->getValue().slt(b.Low->getValue());
11503     });
11504 
11505     // Rearrange the case blocks so that the last one falls through if possible
11506     // without changing the order of probabilities.
11507     for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster; ) {
11508       --I;
11509       if (I->Prob > W.LastCluster->Prob)
11510         break;
11511       if (I->Kind == CC_Range && I->MBB == NextMBB) {
11512         std::swap(*I, *W.LastCluster);
11513         break;
11514       }
11515     }
11516   }
11517 
11518   // Compute total probability.
11519   BranchProbability DefaultProb = W.DefaultProb;
11520   BranchProbability UnhandledProbs = DefaultProb;
11521   for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
11522     UnhandledProbs += I->Prob;
11523 
11524   MachineBasicBlock *CurMBB = W.MBB;
11525   for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
11526     bool FallthroughUnreachable = false;
11527     MachineBasicBlock *Fallthrough;
11528     if (I == W.LastCluster) {
11529       // For the last cluster, fall through to the default destination.
11530       Fallthrough = DefaultMBB;
11531       FallthroughUnreachable = isa<UnreachableInst>(
11532           DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg());
11533     } else {
11534       Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
11535       CurMF->insert(BBI, Fallthrough);
11536       // Put Cond in a virtual register to make it available from the new blocks.
11537       ExportFromCurrentBlock(Cond);
11538     }
11539     UnhandledProbs -= I->Prob;
11540 
11541     switch (I->Kind) {
11542       case CC_JumpTable: {
11543         // FIXME: Optimize away range check based on pivot comparisons.
11544         JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first;
11545         SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second;
11546 
11547         // The jump block hasn't been inserted yet; insert it here.
11548         MachineBasicBlock *JumpMBB = JT->MBB;
11549         CurMF->insert(BBI, JumpMBB);
11550 
11551         auto JumpProb = I->Prob;
11552         auto FallthroughProb = UnhandledProbs;
11553 
11554         // If the default statement is a target of the jump table, we evenly
11555         // distribute the default probability to successors of CurMBB. Also
11556         // update the probability on the edge from JumpMBB to Fallthrough.
11557         for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
11558                                               SE = JumpMBB->succ_end();
11559              SI != SE; ++SI) {
11560           if (*SI == DefaultMBB) {
11561             JumpProb += DefaultProb / 2;
11562             FallthroughProb -= DefaultProb / 2;
11563             JumpMBB->setSuccProbability(SI, DefaultProb / 2);
11564             JumpMBB->normalizeSuccProbs();
11565             break;
11566           }
11567         }
11568 
11569         // If the default clause is unreachable, propagate that knowledge into
11570         // JTH->FallthroughUnreachable which will use it to suppress the range
11571         // check.
11572         //
11573         // However, don't do this if we're doing branch target enforcement,
11574         // because a table branch _without_ a range check can be a tempting JOP
11575         // gadget - out-of-bounds inputs that are impossible in correct
11576         // execution become possible again if an attacker can influence the
11577         // control flow. So if an attacker doesn't already have a BTI bypass
11578         // available, we don't want them to be able to get one out of this
11579         // table branch.
11580         if (FallthroughUnreachable) {
11581           Function &CurFunc = CurMF->getFunction();
11582           bool HasBranchTargetEnforcement = false;
11583           if (CurFunc.hasFnAttribute("branch-target-enforcement")) {
11584             HasBranchTargetEnforcement =
11585                 CurFunc.getFnAttribute("branch-target-enforcement")
11586                     .getValueAsBool();
11587           } else {
11588             HasBranchTargetEnforcement =
11589                 CurMF->getMMI().getModule()->getModuleFlag(
11590                     "branch-target-enforcement");
11591           }
11592           if (!HasBranchTargetEnforcement)
11593             JTH->FallthroughUnreachable = true;
11594         }
11595 
11596         if (!JTH->FallthroughUnreachable)
11597           addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
11598         addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
11599         CurMBB->normalizeSuccProbs();
11600 
11601         // The jump table header will be inserted in our current block, do the
11602         // range check, and fall through to our fallthrough block.
11603         JTH->HeaderBB = CurMBB;
11604         JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
11605 
11606         // If we're in the right place, emit the jump table header right now.
11607         if (CurMBB == SwitchMBB) {
11608           visitJumpTableHeader(*JT, *JTH, SwitchMBB);
11609           JTH->Emitted = true;
11610         }
11611         break;
11612       }
11613       case CC_BitTests: {
11614         // FIXME: Optimize away range check based on pivot comparisons.
11615         BitTestBlock *BTB = &SL->BitTestCases[I->BTCasesIndex];
11616 
11617         // The bit test blocks haven't been inserted yet; insert them here.
11618         for (BitTestCase &BTC : BTB->Cases)
11619           CurMF->insert(BBI, BTC.ThisBB);
11620 
11621         // Fill in fields of the BitTestBlock.
11622         BTB->Parent = CurMBB;
11623         BTB->Default = Fallthrough;
11624 
11625         BTB->DefaultProb = UnhandledProbs;
11626         // If the cases in bit test don't form a contiguous range, we evenly
11627         // distribute the probability on the edge to Fallthrough to two
11628         // successors of CurMBB.
11629         if (!BTB->ContiguousRange) {
11630           BTB->Prob += DefaultProb / 2;
11631           BTB->DefaultProb -= DefaultProb / 2;
11632         }
11633 
11634         if (FallthroughUnreachable)
11635           BTB->FallthroughUnreachable = true;
11636 
11637         // If we're in the right place, emit the bit test header right now.
11638         if (CurMBB == SwitchMBB) {
11639           visitBitTestHeader(*BTB, SwitchMBB);
11640           BTB->Emitted = true;
11641         }
11642         break;
11643       }
11644       case CC_Range: {
11645         const Value *RHS, *LHS, *MHS;
11646         ISD::CondCode CC;
11647         if (I->Low == I->High) {
11648           // Check Cond == I->Low.
11649           CC = ISD::SETEQ;
11650           LHS = Cond;
11651           RHS=I->Low;
11652           MHS = nullptr;
11653         } else {
11654           // Check I->Low <= Cond <= I->High.
11655           CC = ISD::SETLE;
11656           LHS = I->Low;
11657           MHS = Cond;
11658           RHS = I->High;
11659         }
11660 
11661         // If Fallthrough is unreachable, fold away the comparison.
11662         if (FallthroughUnreachable)
11663           CC = ISD::SETTRUE;
11664 
11665         // The false probability is the sum of all unhandled cases.
11666         CaseBlock CB(CC, LHS, RHS, MHS, I->MBB, Fallthrough, CurMBB,
11667                      getCurSDLoc(), I->Prob, UnhandledProbs);
11668 
11669         if (CurMBB == SwitchMBB)
11670           visitSwitchCase(CB, SwitchMBB);
11671         else
11672           SL->SwitchCases.push_back(CB);
11673 
11674         break;
11675       }
11676     }
11677     CurMBB = Fallthrough;
11678   }
11679 }
11680 
11681 void SelectionDAGBuilder::splitWorkItem(SwitchWorkList &WorkList,
11682                                         const SwitchWorkListItem &W,
11683                                         Value *Cond,
11684                                         MachineBasicBlock *SwitchMBB) {
11685   assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) &&
11686          "Clusters not sorted?");
11687   assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!");
11688 
11689   auto [LastLeft, FirstRight, LeftProb, RightProb] =
11690       SL->computeSplitWorkItemInfo(W);
11691 
11692   // Use the first element on the right as pivot since we will make less-than
11693   // comparisons against it.
11694   CaseClusterIt PivotCluster = FirstRight;
11695   assert(PivotCluster > W.FirstCluster);
11696   assert(PivotCluster <= W.LastCluster);
11697 
11698   CaseClusterIt FirstLeft = W.FirstCluster;
11699   CaseClusterIt LastRight = W.LastCluster;
11700 
11701   const ConstantInt *Pivot = PivotCluster->Low;
11702 
11703   // New blocks will be inserted immediately after the current one.
11704   MachineFunction::iterator BBI(W.MBB);
11705   ++BBI;
11706 
11707   // We will branch to the LHS if Value < Pivot. If LHS is a single cluster,
11708   // we can branch to its destination directly if it's squeezed exactly in
11709   // between the known lower bound and Pivot - 1.
11710   MachineBasicBlock *LeftMBB;
11711   if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&
11712       FirstLeft->Low == W.GE &&
11713       (FirstLeft->High->getValue() + 1LL) == Pivot->getValue()) {
11714     LeftMBB = FirstLeft->MBB;
11715   } else {
11716     LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
11717     FuncInfo.MF->insert(BBI, LeftMBB);
11718     WorkList.push_back(
11719         {LeftMBB, FirstLeft, LastLeft, W.GE, Pivot, W.DefaultProb / 2});
11720     // Put Cond in a virtual register to make it available from the new blocks.
11721     ExportFromCurrentBlock(Cond);
11722   }
11723 
11724   // Similarly, we will branch to the RHS if Value >= Pivot. If RHS is a
11725   // single cluster, RHS.Low == Pivot, and we can branch to its destination
11726   // directly if RHS.High equals the current upper bound.
11727   MachineBasicBlock *RightMBB;
11728   if (FirstRight == LastRight && FirstRight->Kind == CC_Range &&
11729       W.LT && (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) {
11730     RightMBB = FirstRight->MBB;
11731   } else {
11732     RightMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
11733     FuncInfo.MF->insert(BBI, RightMBB);
11734     WorkList.push_back(
11735         {RightMBB, FirstRight, LastRight, Pivot, W.LT, W.DefaultProb / 2});
11736     // Put Cond in a virtual register to make it available from the new blocks.
11737     ExportFromCurrentBlock(Cond);
11738   }
11739 
11740   // Create the CaseBlock record that will be used to lower the branch.
11741   CaseBlock CB(ISD::SETLT, Cond, Pivot, nullptr, LeftMBB, RightMBB, W.MBB,
11742                getCurSDLoc(), LeftProb, RightProb);
11743 
11744   if (W.MBB == SwitchMBB)
11745     visitSwitchCase(CB, SwitchMBB);
11746   else
11747     SL->SwitchCases.push_back(CB);
11748 }
11749 
11750 // Scale CaseProb after peeling a case with the probablity of PeeledCaseProb
11751 // from the swith statement.
11752 static BranchProbability scaleCaseProbality(BranchProbability CaseProb,
11753                                             BranchProbability PeeledCaseProb) {
11754   if (PeeledCaseProb == BranchProbability::getOne())
11755     return BranchProbability::getZero();
11756   BranchProbability SwitchProb = PeeledCaseProb.getCompl();
11757 
11758   uint32_t Numerator = CaseProb.getNumerator();
11759   uint32_t Denominator = SwitchProb.scale(CaseProb.getDenominator());
11760   return BranchProbability(Numerator, std::max(Numerator, Denominator));
11761 }
11762 
11763 // Try to peel the top probability case if it exceeds the threshold.
11764 // Return current MachineBasicBlock for the switch statement if the peeling
11765 // does not occur.
11766 // If the peeling is performed, return the newly created MachineBasicBlock
11767 // for the peeled switch statement. Also update Clusters to remove the peeled
11768 // case. PeeledCaseProb is the BranchProbability for the peeled case.
11769 MachineBasicBlock *SelectionDAGBuilder::peelDominantCaseCluster(
11770     const SwitchInst &SI, CaseClusterVector &Clusters,
11771     BranchProbability &PeeledCaseProb) {
11772   MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
11773   // Don't perform if there is only one cluster or optimizing for size.
11774   if (SwitchPeelThreshold > 100 || !FuncInfo.BPI || Clusters.size() < 2 ||
11775       TM.getOptLevel() == CodeGenOptLevel::None ||
11776       SwitchMBB->getParent()->getFunction().hasMinSize())
11777     return SwitchMBB;
11778 
11779   BranchProbability TopCaseProb = BranchProbability(SwitchPeelThreshold, 100);
11780   unsigned PeeledCaseIndex = 0;
11781   bool SwitchPeeled = false;
11782   for (unsigned Index = 0; Index < Clusters.size(); ++Index) {
11783     CaseCluster &CC = Clusters[Index];
11784     if (CC.Prob < TopCaseProb)
11785       continue;
11786     TopCaseProb = CC.Prob;
11787     PeeledCaseIndex = Index;
11788     SwitchPeeled = true;
11789   }
11790   if (!SwitchPeeled)
11791     return SwitchMBB;
11792 
11793   LLVM_DEBUG(dbgs() << "Peeled one top case in switch stmt, prob: "
11794                     << TopCaseProb << "\n");
11795 
11796   // Record the MBB for the peeled switch statement.
11797   MachineFunction::iterator BBI(SwitchMBB);
11798   ++BBI;
11799   MachineBasicBlock *PeeledSwitchMBB =
11800       FuncInfo.MF->CreateMachineBasicBlock(SwitchMBB->getBasicBlock());
11801   FuncInfo.MF->insert(BBI, PeeledSwitchMBB);
11802 
11803   ExportFromCurrentBlock(SI.getCondition());
11804   auto PeeledCaseIt = Clusters.begin() + PeeledCaseIndex;
11805   SwitchWorkListItem W = {SwitchMBB, PeeledCaseIt, PeeledCaseIt,
11806                           nullptr,   nullptr,      TopCaseProb.getCompl()};
11807   lowerWorkItem(W, SI.getCondition(), SwitchMBB, PeeledSwitchMBB);
11808 
11809   Clusters.erase(PeeledCaseIt);
11810   for (CaseCluster &CC : Clusters) {
11811     LLVM_DEBUG(
11812         dbgs() << "Scale the probablity for one cluster, before scaling: "
11813                << CC.Prob << "\n");
11814     CC.Prob = scaleCaseProbality(CC.Prob, TopCaseProb);
11815     LLVM_DEBUG(dbgs() << "After scaling: " << CC.Prob << "\n");
11816   }
11817   PeeledCaseProb = TopCaseProb;
11818   return PeeledSwitchMBB;
11819 }
11820 
11821 void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
11822   // Extract cases from the switch.
11823   BranchProbabilityInfo *BPI = FuncInfo.BPI;
11824   CaseClusterVector Clusters;
11825   Clusters.reserve(SI.getNumCases());
11826   for (auto I : SI.cases()) {
11827     MachineBasicBlock *Succ = FuncInfo.MBBMap[I.getCaseSuccessor()];
11828     const ConstantInt *CaseVal = I.getCaseValue();
11829     BranchProbability Prob =
11830         BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
11831             : BranchProbability(1, SI.getNumCases() + 1);
11832     Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
11833   }
11834 
11835   MachineBasicBlock *DefaultMBB = FuncInfo.MBBMap[SI.getDefaultDest()];
11836 
11837   // Cluster adjacent cases with the same destination. We do this at all
11838   // optimization levels because it's cheap to do and will make codegen faster
11839   // if there are many clusters.
11840   sortAndRangeify(Clusters);
11841 
11842   // The branch probablity of the peeled case.
11843   BranchProbability PeeledCaseProb = BranchProbability::getZero();
11844   MachineBasicBlock *PeeledSwitchMBB =
11845       peelDominantCaseCluster(SI, Clusters, PeeledCaseProb);
11846 
11847   // If there is only the default destination, jump there directly.
11848   MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
11849   if (Clusters.empty()) {
11850     assert(PeeledSwitchMBB == SwitchMBB);
11851     SwitchMBB->addSuccessor(DefaultMBB);
11852     if (DefaultMBB != NextBlock(SwitchMBB)) {
11853       DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
11854                               getControlRoot(), DAG.getBasicBlock(DefaultMBB)));
11855     }
11856     return;
11857   }
11858 
11859   SL->findJumpTables(Clusters, &SI, getCurSDLoc(), DefaultMBB, DAG.getPSI(),
11860                      DAG.getBFI());
11861   SL->findBitTestClusters(Clusters, &SI);
11862 
11863   LLVM_DEBUG({
11864     dbgs() << "Case clusters: ";
11865     for (const CaseCluster &C : Clusters) {
11866       if (C.Kind == CC_JumpTable)
11867         dbgs() << "JT:";
11868       if (C.Kind == CC_BitTests)
11869         dbgs() << "BT:";
11870 
11871       C.Low->getValue().print(dbgs(), true);
11872       if (C.Low != C.High) {
11873         dbgs() << '-';
11874         C.High->getValue().print(dbgs(), true);
11875       }
11876       dbgs() << ' ';
11877     }
11878     dbgs() << '\n';
11879   });
11880 
11881   assert(!Clusters.empty());
11882   SwitchWorkList WorkList;
11883   CaseClusterIt First = Clusters.begin();
11884   CaseClusterIt Last = Clusters.end() - 1;
11885   auto DefaultProb = getEdgeProbability(PeeledSwitchMBB, DefaultMBB);
11886   // Scale the branchprobability for DefaultMBB if the peel occurs and
11887   // DefaultMBB is not replaced.
11888   if (PeeledCaseProb != BranchProbability::getZero() &&
11889       DefaultMBB == FuncInfo.MBBMap[SI.getDefaultDest()])
11890     DefaultProb = scaleCaseProbality(DefaultProb, PeeledCaseProb);
11891   WorkList.push_back(
11892       {PeeledSwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
11893 
11894   while (!WorkList.empty()) {
11895     SwitchWorkListItem W = WorkList.pop_back_val();
11896     unsigned NumClusters = W.LastCluster - W.FirstCluster + 1;
11897 
11898     if (NumClusters > 3 && TM.getOptLevel() != CodeGenOptLevel::None &&
11899         !DefaultMBB->getParent()->getFunction().hasMinSize()) {
11900       // For optimized builds, lower large range as a balanced binary tree.
11901       splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB);
11902       continue;
11903     }
11904 
11905     lowerWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB);
11906   }
11907 }
11908 
11909 void SelectionDAGBuilder::visitStepVector(const CallInst &I) {
11910   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11911   auto DL = getCurSDLoc();
11912   EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
11913   setValue(&I, DAG.getStepVector(DL, ResultVT));
11914 }
11915 
11916 void SelectionDAGBuilder::visitVectorReverse(const CallInst &I) {
11917   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11918   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
11919 
11920   SDLoc DL = getCurSDLoc();
11921   SDValue V = getValue(I.getOperand(0));
11922   assert(VT == V.getValueType() && "Malformed vector.reverse!");
11923 
11924   if (VT.isScalableVector()) {
11925     setValue(&I, DAG.getNode(ISD::VECTOR_REVERSE, DL, VT, V));
11926     return;
11927   }
11928 
11929   // Use VECTOR_SHUFFLE for the fixed-length vector
11930   // to maintain existing behavior.
11931   SmallVector<int, 8> Mask;
11932   unsigned NumElts = VT.getVectorMinNumElements();
11933   for (unsigned i = 0; i != NumElts; ++i)
11934     Mask.push_back(NumElts - 1 - i);
11935 
11936   setValue(&I, DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), Mask));
11937 }
11938 
11939 void SelectionDAGBuilder::visitVectorDeinterleave(const CallInst &I) {
11940   auto DL = getCurSDLoc();
11941   SDValue InVec = getValue(I.getOperand(0));
11942   EVT OutVT =
11943       InVec.getValueType().getHalfNumVectorElementsVT(*DAG.getContext());
11944 
11945   unsigned OutNumElts = OutVT.getVectorMinNumElements();
11946 
11947   // ISD Node needs the input vectors split into two equal parts
11948   SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OutVT, InVec,
11949                            DAG.getVectorIdxConstant(0, DL));
11950   SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OutVT, InVec,
11951                            DAG.getVectorIdxConstant(OutNumElts, DL));
11952 
11953   // Use VECTOR_SHUFFLE for fixed-length vectors to benefit from existing
11954   // legalisation and combines.
11955   if (OutVT.isFixedLengthVector()) {
11956     SDValue Even = DAG.getVectorShuffle(OutVT, DL, Lo, Hi,
11957                                         createStrideMask(0, 2, OutNumElts));
11958     SDValue Odd = DAG.getVectorShuffle(OutVT, DL, Lo, Hi,
11959                                        createStrideMask(1, 2, OutNumElts));
11960     SDValue Res = DAG.getMergeValues({Even, Odd}, getCurSDLoc());
11961     setValue(&I, Res);
11962     return;
11963   }
11964 
11965   SDValue Res = DAG.getNode(ISD::VECTOR_DEINTERLEAVE, DL,
11966                             DAG.getVTList(OutVT, OutVT), Lo, Hi);
11967   setValue(&I, Res);
11968 }
11969 
11970 void SelectionDAGBuilder::visitVectorInterleave(const CallInst &I) {
11971   auto DL = getCurSDLoc();
11972   EVT InVT = getValue(I.getOperand(0)).getValueType();
11973   SDValue InVec0 = getValue(I.getOperand(0));
11974   SDValue InVec1 = getValue(I.getOperand(1));
11975   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11976   EVT OutVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
11977 
11978   // Use VECTOR_SHUFFLE for fixed-length vectors to benefit from existing
11979   // legalisation and combines.
11980   if (OutVT.isFixedLengthVector()) {
11981     unsigned NumElts = InVT.getVectorMinNumElements();
11982     SDValue V = DAG.getNode(ISD::CONCAT_VECTORS, DL, OutVT, InVec0, InVec1);
11983     setValue(&I, DAG.getVectorShuffle(OutVT, DL, V, DAG.getUNDEF(OutVT),
11984                                       createInterleaveMask(NumElts, 2)));
11985     return;
11986   }
11987 
11988   SDValue Res = DAG.getNode(ISD::VECTOR_INTERLEAVE, DL,
11989                             DAG.getVTList(InVT, InVT), InVec0, InVec1);
11990   Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, OutVT, Res.getValue(0),
11991                     Res.getValue(1));
11992   setValue(&I, Res);
11993 }
11994 
11995 void SelectionDAGBuilder::visitFreeze(const FreezeInst &I) {
11996   SmallVector<EVT, 4> ValueVTs;
11997   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
11998                   ValueVTs);
11999   unsigned NumValues = ValueVTs.size();
12000   if (NumValues == 0) return;
12001 
12002   SmallVector<SDValue, 4> Values(NumValues);
12003   SDValue Op = getValue(I.getOperand(0));
12004 
12005   for (unsigned i = 0; i != NumValues; ++i)
12006     Values[i] = DAG.getNode(ISD::FREEZE, getCurSDLoc(), ValueVTs[i],
12007                             SDValue(Op.getNode(), Op.getResNo() + i));
12008 
12009   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
12010                            DAG.getVTList(ValueVTs), Values));
12011 }
12012 
12013 void SelectionDAGBuilder::visitVectorSplice(const CallInst &I) {
12014   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12015   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
12016 
12017   SDLoc DL = getCurSDLoc();
12018   SDValue V1 = getValue(I.getOperand(0));
12019   SDValue V2 = getValue(I.getOperand(1));
12020   int64_t Imm = cast<ConstantInt>(I.getOperand(2))->getSExtValue();
12021 
12022   // VECTOR_SHUFFLE doesn't support a scalable mask so use a dedicated node.
12023   if (VT.isScalableVector()) {
12024     MVT IdxVT = TLI.getVectorIdxTy(DAG.getDataLayout());
12025     setValue(&I, DAG.getNode(ISD::VECTOR_SPLICE, DL, VT, V1, V2,
12026                              DAG.getConstant(Imm, DL, IdxVT)));
12027     return;
12028   }
12029 
12030   unsigned NumElts = VT.getVectorNumElements();
12031 
12032   uint64_t Idx = (NumElts + Imm) % NumElts;
12033 
12034   // Use VECTOR_SHUFFLE to maintain original behaviour for fixed-length vectors.
12035   SmallVector<int, 8> Mask;
12036   for (unsigned i = 0; i < NumElts; ++i)
12037     Mask.push_back(Idx + i);
12038   setValue(&I, DAG.getVectorShuffle(VT, DL, V1, V2, Mask));
12039 }
12040 
12041 // Consider the following MIR after SelectionDAG, which produces output in
12042 // phyregs in the first case or virtregs in the second case.
12043 //
12044 // INLINEASM_BR ..., implicit-def $ebx, ..., implicit-def $edx
12045 // %5:gr32 = COPY $ebx
12046 // %6:gr32 = COPY $edx
12047 // %1:gr32 = COPY %6:gr32
12048 // %0:gr32 = COPY %5:gr32
12049 //
12050 // INLINEASM_BR ..., def %5:gr32, ..., def %6:gr32
12051 // %1:gr32 = COPY %6:gr32
12052 // %0:gr32 = COPY %5:gr32
12053 //
12054 // Given %0, we'd like to return $ebx in the first case and %5 in the second.
12055 // Given %1, we'd like to return $edx in the first case and %6 in the second.
12056 //
12057 // If a callbr has outputs, it will have a single mapping in FuncInfo.ValueMap
12058 // to a single virtreg (such as %0). The remaining outputs monotonically
12059 // increase in virtreg number from there. If a callbr has no outputs, then it
12060 // should not have a corresponding callbr landingpad; in fact, the callbr
12061 // landingpad would not even be able to refer to such a callbr.
12062 static Register FollowCopyChain(MachineRegisterInfo &MRI, Register Reg) {
12063   MachineInstr *MI = MRI.def_begin(Reg)->getParent();
12064   // There is definitely at least one copy.
12065   assert(MI->getOpcode() == TargetOpcode::COPY &&
12066          "start of copy chain MUST be COPY");
12067   Reg = MI->getOperand(1).getReg();
12068   MI = MRI.def_begin(Reg)->getParent();
12069   // There may be an optional second copy.
12070   if (MI->getOpcode() == TargetOpcode::COPY) {
12071     assert(Reg.isVirtual() && "expected COPY of virtual register");
12072     Reg = MI->getOperand(1).getReg();
12073     assert(Reg.isPhysical() && "expected COPY of physical register");
12074     MI = MRI.def_begin(Reg)->getParent();
12075   }
12076   // The start of the chain must be an INLINEASM_BR.
12077   assert(MI->getOpcode() == TargetOpcode::INLINEASM_BR &&
12078          "end of copy chain MUST be INLINEASM_BR");
12079   return Reg;
12080 }
12081 
12082 // We must do this walk rather than the simpler
12083 //   setValue(&I, getCopyFromRegs(CBR, CBR->getType()));
12084 // otherwise we will end up with copies of virtregs only valid along direct
12085 // edges.
12086 void SelectionDAGBuilder::visitCallBrLandingPad(const CallInst &I) {
12087   SmallVector<EVT, 8> ResultVTs;
12088   SmallVector<SDValue, 8> ResultValues;
12089   const auto *CBR =
12090       cast<CallBrInst>(I.getParent()->getUniquePredecessor()->getTerminator());
12091 
12092   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12093   const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
12094   MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
12095 
12096   unsigned InitialDef = FuncInfo.ValueMap[CBR];
12097   SDValue Chain = DAG.getRoot();
12098 
12099   // Re-parse the asm constraints string.
12100   TargetLowering::AsmOperandInfoVector TargetConstraints =
12101       TLI.ParseConstraints(DAG.getDataLayout(), TRI, *CBR);
12102   for (auto &T : TargetConstraints) {
12103     SDISelAsmOperandInfo OpInfo(T);
12104     if (OpInfo.Type != InlineAsm::isOutput)
12105       continue;
12106 
12107     // Pencil in OpInfo.ConstraintType and OpInfo.ConstraintVT based on the
12108     // individual constraint.
12109     TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
12110 
12111     switch (OpInfo.ConstraintType) {
12112     case TargetLowering::C_Register:
12113     case TargetLowering::C_RegisterClass: {
12114       // Fill in OpInfo.AssignedRegs.Regs.
12115       getRegistersForValue(DAG, getCurSDLoc(), OpInfo, OpInfo);
12116 
12117       // getRegistersForValue may produce 1 to many registers based on whether
12118       // the OpInfo.ConstraintVT is legal on the target or not.
12119       for (size_t i = 0, e = OpInfo.AssignedRegs.Regs.size(); i != e; ++i) {
12120         Register OriginalDef = FollowCopyChain(MRI, InitialDef++);
12121         if (Register::isPhysicalRegister(OriginalDef))
12122           FuncInfo.MBB->addLiveIn(OriginalDef);
12123         // Update the assigned registers to use the original defs.
12124         OpInfo.AssignedRegs.Regs[i] = OriginalDef;
12125       }
12126 
12127       SDValue V = OpInfo.AssignedRegs.getCopyFromRegs(
12128           DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, CBR);
12129       ResultValues.push_back(V);
12130       ResultVTs.push_back(OpInfo.ConstraintVT);
12131       break;
12132     }
12133     case TargetLowering::C_Other: {
12134       SDValue Flag;
12135       SDValue V = TLI.LowerAsmOutputForConstraint(Chain, Flag, getCurSDLoc(),
12136                                                   OpInfo, DAG);
12137       ++InitialDef;
12138       ResultValues.push_back(V);
12139       ResultVTs.push_back(OpInfo.ConstraintVT);
12140       break;
12141     }
12142     default:
12143       break;
12144     }
12145   }
12146   SDValue V = DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
12147                           DAG.getVTList(ResultVTs), ResultValues);
12148   setValue(&I, V);
12149 }
12150