xref: /llvm-project/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp (revision f74aed793819bf9e0509e802f33c5e29c350540c)
1 //===- SelectionDAGBuilder.cpp - Selection-DAG building -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements routines for translating from LLVM IR into SelectionDAG IR.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "SelectionDAGBuilder.h"
14 #include "SDNodeDbgValue.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/BitVector.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/SmallSet.h"
21 #include "llvm/ADT/StringRef.h"
22 #include "llvm/ADT/Twine.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/Analysis/BranchProbabilityInfo.h"
25 #include "llvm/Analysis/ConstantFolding.h"
26 #include "llvm/Analysis/Loads.h"
27 #include "llvm/Analysis/MemoryLocation.h"
28 #include "llvm/Analysis/TargetLibraryInfo.h"
29 #include "llvm/Analysis/TargetTransformInfo.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/Analysis/VectorUtils.h"
32 #include "llvm/CodeGen/Analysis.h"
33 #include "llvm/CodeGen/AssignmentTrackingAnalysis.h"
34 #include "llvm/CodeGen/CodeGenCommonISel.h"
35 #include "llvm/CodeGen/FunctionLoweringInfo.h"
36 #include "llvm/CodeGen/GCMetadata.h"
37 #include "llvm/CodeGen/ISDOpcodes.h"
38 #include "llvm/CodeGen/MachineBasicBlock.h"
39 #include "llvm/CodeGen/MachineFrameInfo.h"
40 #include "llvm/CodeGen/MachineFunction.h"
41 #include "llvm/CodeGen/MachineInstrBuilder.h"
42 #include "llvm/CodeGen/MachineInstrBundleIterator.h"
43 #include "llvm/CodeGen/MachineMemOperand.h"
44 #include "llvm/CodeGen/MachineModuleInfo.h"
45 #include "llvm/CodeGen/MachineOperand.h"
46 #include "llvm/CodeGen/MachineRegisterInfo.h"
47 #include "llvm/CodeGen/SelectionDAG.h"
48 #include "llvm/CodeGen/SelectionDAGTargetInfo.h"
49 #include "llvm/CodeGen/StackMaps.h"
50 #include "llvm/CodeGen/SwiftErrorValueTracking.h"
51 #include "llvm/CodeGen/TargetFrameLowering.h"
52 #include "llvm/CodeGen/TargetInstrInfo.h"
53 #include "llvm/CodeGen/TargetOpcodes.h"
54 #include "llvm/CodeGen/TargetRegisterInfo.h"
55 #include "llvm/CodeGen/TargetSubtargetInfo.h"
56 #include "llvm/CodeGen/WinEHFuncInfo.h"
57 #include "llvm/IR/Argument.h"
58 #include "llvm/IR/Attributes.h"
59 #include "llvm/IR/BasicBlock.h"
60 #include "llvm/IR/CFG.h"
61 #include "llvm/IR/CallingConv.h"
62 #include "llvm/IR/Constant.h"
63 #include "llvm/IR/ConstantRange.h"
64 #include "llvm/IR/Constants.h"
65 #include "llvm/IR/DataLayout.h"
66 #include "llvm/IR/DebugInfo.h"
67 #include "llvm/IR/DebugInfoMetadata.h"
68 #include "llvm/IR/DerivedTypes.h"
69 #include "llvm/IR/DiagnosticInfo.h"
70 #include "llvm/IR/EHPersonalities.h"
71 #include "llvm/IR/Function.h"
72 #include "llvm/IR/GetElementPtrTypeIterator.h"
73 #include "llvm/IR/InlineAsm.h"
74 #include "llvm/IR/InstrTypes.h"
75 #include "llvm/IR/Instructions.h"
76 #include "llvm/IR/IntrinsicInst.h"
77 #include "llvm/IR/Intrinsics.h"
78 #include "llvm/IR/IntrinsicsAArch64.h"
79 #include "llvm/IR/IntrinsicsAMDGPU.h"
80 #include "llvm/IR/IntrinsicsWebAssembly.h"
81 #include "llvm/IR/LLVMContext.h"
82 #include "llvm/IR/MemoryModelRelaxationAnnotations.h"
83 #include "llvm/IR/Metadata.h"
84 #include "llvm/IR/Module.h"
85 #include "llvm/IR/Operator.h"
86 #include "llvm/IR/PatternMatch.h"
87 #include "llvm/IR/Statepoint.h"
88 #include "llvm/IR/Type.h"
89 #include "llvm/IR/User.h"
90 #include "llvm/IR/Value.h"
91 #include "llvm/MC/MCContext.h"
92 #include "llvm/Support/AtomicOrdering.h"
93 #include "llvm/Support/Casting.h"
94 #include "llvm/Support/CommandLine.h"
95 #include "llvm/Support/Compiler.h"
96 #include "llvm/Support/Debug.h"
97 #include "llvm/Support/InstructionCost.h"
98 #include "llvm/Support/MathExtras.h"
99 #include "llvm/Support/raw_ostream.h"
100 #include "llvm/Target/TargetIntrinsicInfo.h"
101 #include "llvm/Target/TargetMachine.h"
102 #include "llvm/Target/TargetOptions.h"
103 #include "llvm/TargetParser/Triple.h"
104 #include "llvm/Transforms/Utils/Local.h"
105 #include <cstddef>
106 #include <limits>
107 #include <optional>
108 #include <tuple>
109 
110 using namespace llvm;
111 using namespace PatternMatch;
112 using namespace SwitchCG;
113 
114 #define DEBUG_TYPE "isel"
115 
116 /// LimitFloatPrecision - Generate low-precision inline sequences for
117 /// some float libcalls (6, 8 or 12 bits).
118 static unsigned LimitFloatPrecision;
119 
120 static cl::opt<bool>
121     InsertAssertAlign("insert-assert-align", cl::init(true),
122                       cl::desc("Insert the experimental `assertalign` node."),
123                       cl::ReallyHidden);
124 
125 static cl::opt<unsigned, true>
126     LimitFPPrecision("limit-float-precision",
127                      cl::desc("Generate low-precision inline sequences "
128                               "for some float libcalls"),
129                      cl::location(LimitFloatPrecision), cl::Hidden,
130                      cl::init(0));
131 
132 static cl::opt<unsigned> SwitchPeelThreshold(
133     "switch-peel-threshold", cl::Hidden, cl::init(66),
134     cl::desc("Set the case probability threshold for peeling the case from a "
135              "switch statement. A value greater than 100 will void this "
136              "optimization"));
137 
138 // Limit the width of DAG chains. This is important in general to prevent
139 // DAG-based analysis from blowing up. For example, alias analysis and
140 // load clustering may not complete in reasonable time. It is difficult to
141 // recognize and avoid this situation within each individual analysis, and
142 // future analyses are likely to have the same behavior. Limiting DAG width is
143 // the safe approach and will be especially important with global DAGs.
144 //
145 // MaxParallelChains default is arbitrarily high to avoid affecting
146 // optimization, but could be lowered to improve compile time. Any ld-ld-st-st
147 // sequence over this should have been converted to llvm.memcpy by the
148 // frontend. It is easy to induce this behavior with .ll code such as:
149 // %buffer = alloca [4096 x i8]
150 // %data = load [4096 x i8]* %argPtr
151 // store [4096 x i8] %data, [4096 x i8]* %buffer
152 static const unsigned MaxParallelChains = 64;
153 
154 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
155                                       const SDValue *Parts, unsigned NumParts,
156                                       MVT PartVT, EVT ValueVT, const Value *V,
157                                       SDValue InChain,
158                                       std::optional<CallingConv::ID> CC);
159 
160 /// getCopyFromParts - Create a value that contains the specified legal parts
161 /// combined into the value they represent.  If the parts combine to a type
162 /// larger than ValueVT then AssertOp can be used to specify whether the extra
163 /// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
164 /// (ISD::AssertSext).
165 static SDValue
166 getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts,
167                  unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V,
168                  SDValue InChain,
169                  std::optional<CallingConv::ID> CC = std::nullopt,
170                  std::optional<ISD::NodeType> AssertOp = std::nullopt) {
171   // Let the target assemble the parts if it wants to
172   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
173   if (SDValue Val = TLI.joinRegisterPartsIntoValue(DAG, DL, Parts, NumParts,
174                                                    PartVT, ValueVT, CC))
175     return Val;
176 
177   if (ValueVT.isVector())
178     return getCopyFromPartsVector(DAG, DL, Parts, NumParts, PartVT, ValueVT, V,
179                                   InChain, CC);
180 
181   assert(NumParts > 0 && "No parts to assemble!");
182   SDValue Val = Parts[0];
183 
184   if (NumParts > 1) {
185     // Assemble the value from multiple parts.
186     if (ValueVT.isInteger()) {
187       unsigned PartBits = PartVT.getSizeInBits();
188       unsigned ValueBits = ValueVT.getSizeInBits();
189 
190       // Assemble the power of 2 part.
191       unsigned RoundParts = llvm::bit_floor(NumParts);
192       unsigned RoundBits = PartBits * RoundParts;
193       EVT RoundVT = RoundBits == ValueBits ?
194         ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
195       SDValue Lo, Hi;
196 
197       EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
198 
199       if (RoundParts > 2) {
200         Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2, PartVT, HalfVT, V,
201                               InChain);
202         Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2, RoundParts / 2,
203                               PartVT, HalfVT, V, InChain);
204       } else {
205         Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]);
206         Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]);
207       }
208 
209       if (DAG.getDataLayout().isBigEndian())
210         std::swap(Lo, Hi);
211 
212       Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi);
213 
214       if (RoundParts < NumParts) {
215         // Assemble the trailing non-power-of-2 part.
216         unsigned OddParts = NumParts - RoundParts;
217         EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
218         Hi = getCopyFromParts(DAG, DL, Parts + RoundParts, OddParts, PartVT,
219                               OddVT, V, InChain, CC);
220 
221         // Combine the round and odd parts.
222         Lo = Val;
223         if (DAG.getDataLayout().isBigEndian())
224           std::swap(Lo, Hi);
225         EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
226         Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi);
227         Hi = DAG.getNode(ISD::SHL, DL, TotalVT, Hi,
228                          DAG.getConstant(Lo.getValueSizeInBits(), DL,
229                                          TLI.getShiftAmountTy(
230                                              TotalVT, DAG.getDataLayout())));
231         Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo);
232         Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi);
233       }
234     } else if (PartVT.isFloatingPoint()) {
235       // FP split into multiple FP parts (for ppcf128)
236       assert(ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
237              "Unexpected split");
238       SDValue Lo, Hi;
239       Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]);
240       Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]);
241       if (TLI.hasBigEndianPartOrdering(ValueVT, DAG.getDataLayout()))
242         std::swap(Lo, Hi);
243       Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi);
244     } else {
245       // FP split into integer parts (soft fp)
246       assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
247              !PartVT.isVector() && "Unexpected split");
248       EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
249       Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V,
250                              InChain, CC);
251     }
252   }
253 
254   // There is now one part, held in Val.  Correct it to match ValueVT.
255   // PartEVT is the type of the register class that holds the value.
256   // ValueVT is the type of the inline asm operation.
257   EVT PartEVT = Val.getValueType();
258 
259   if (PartEVT == ValueVT)
260     return Val;
261 
262   if (PartEVT.isInteger() && ValueVT.isFloatingPoint() &&
263       ValueVT.bitsLT(PartEVT)) {
264     // For an FP value in an integer part, we need to truncate to the right
265     // width first.
266     PartEVT = EVT::getIntegerVT(*DAG.getContext(),  ValueVT.getSizeInBits());
267     Val = DAG.getNode(ISD::TRUNCATE, DL, PartEVT, Val);
268   }
269 
270   // Handle types that have the same size.
271   if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits())
272     return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
273 
274   // Handle types with different sizes.
275   if (PartEVT.isInteger() && ValueVT.isInteger()) {
276     if (ValueVT.bitsLT(PartEVT)) {
277       // For a truncate, see if we have any information to
278       // indicate whether the truncated bits will always be
279       // zero or sign-extension.
280       if (AssertOp)
281         Val = DAG.getNode(*AssertOp, DL, PartEVT, Val,
282                           DAG.getValueType(ValueVT));
283       return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
284     }
285     return DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
286   }
287 
288   if (PartEVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
289     // FP_ROUND's are always exact here.
290     if (ValueVT.bitsLT(Val.getValueType())) {
291 
292       SDValue NoChange =
293           DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout()));
294 
295       if (DAG.getMachineFunction().getFunction().getAttributes().hasFnAttr(
296               llvm::Attribute::StrictFP)) {
297         return DAG.getNode(ISD::STRICT_FP_ROUND, DL,
298                            DAG.getVTList(ValueVT, MVT::Other), InChain, Val,
299                            NoChange);
300       }
301 
302       return DAG.getNode(ISD::FP_ROUND, DL, ValueVT, Val, NoChange);
303     }
304 
305     return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val);
306   }
307 
308   // Handle MMX to a narrower integer type by bitcasting MMX to integer and
309   // then truncating.
310   if (PartEVT == MVT::x86mmx && ValueVT.isInteger() &&
311       ValueVT.bitsLT(PartEVT)) {
312     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i64, Val);
313     return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
314   }
315 
316   report_fatal_error("Unknown mismatch in getCopyFromParts!");
317 }
318 
319 static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V,
320                                               const Twine &ErrMsg) {
321   const Instruction *I = dyn_cast_or_null<Instruction>(V);
322   if (!V)
323     return Ctx.emitError(ErrMsg);
324 
325   const char *AsmError = ", possible invalid constraint for vector type";
326   if (const CallInst *CI = dyn_cast<CallInst>(I))
327     if (CI->isInlineAsm())
328       return Ctx.emitError(I, ErrMsg + AsmError);
329 
330   return Ctx.emitError(I, ErrMsg);
331 }
332 
333 /// getCopyFromPartsVector - Create a value that contains the specified legal
334 /// parts combined into the value they represent.  If the parts combine to a
335 /// type larger than ValueVT then AssertOp can be used to specify whether the
336 /// extra bits are known to be zero (ISD::AssertZext) or sign extended from
337 /// ValueVT (ISD::AssertSext).
338 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
339                                       const SDValue *Parts, unsigned NumParts,
340                                       MVT PartVT, EVT ValueVT, const Value *V,
341                                       SDValue InChain,
342                                       std::optional<CallingConv::ID> CallConv) {
343   assert(ValueVT.isVector() && "Not a vector value");
344   assert(NumParts > 0 && "No parts to assemble!");
345   const bool IsABIRegCopy = CallConv.has_value();
346 
347   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
348   SDValue Val = Parts[0];
349 
350   // Handle a multi-element vector.
351   if (NumParts > 1) {
352     EVT IntermediateVT;
353     MVT RegisterVT;
354     unsigned NumIntermediates;
355     unsigned NumRegs;
356 
357     if (IsABIRegCopy) {
358       NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
359           *DAG.getContext(), *CallConv, ValueVT, IntermediateVT,
360           NumIntermediates, RegisterVT);
361     } else {
362       NumRegs =
363           TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
364                                      NumIntermediates, RegisterVT);
365     }
366 
367     assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
368     NumParts = NumRegs; // Silence a compiler warning.
369     assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
370     assert(RegisterVT.getSizeInBits() ==
371            Parts[0].getSimpleValueType().getSizeInBits() &&
372            "Part type sizes don't match!");
373 
374     // Assemble the parts into intermediate operands.
375     SmallVector<SDValue, 8> Ops(NumIntermediates);
376     if (NumIntermediates == NumParts) {
377       // If the register was not expanded, truncate or copy the value,
378       // as appropriate.
379       for (unsigned i = 0; i != NumParts; ++i)
380         Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1, PartVT, IntermediateVT,
381                                   V, InChain, CallConv);
382     } else if (NumParts > 0) {
383       // If the intermediate type was expanded, build the intermediate
384       // operands from the parts.
385       assert(NumParts % NumIntermediates == 0 &&
386              "Must expand into a divisible number of parts!");
387       unsigned Factor = NumParts / NumIntermediates;
388       for (unsigned i = 0; i != NumIntermediates; ++i)
389         Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor, PartVT,
390                                   IntermediateVT, V, InChain, CallConv);
391     }
392 
393     // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
394     // intermediate operands.
395     EVT BuiltVectorTy =
396         IntermediateVT.isVector()
397             ? EVT::getVectorVT(
398                   *DAG.getContext(), IntermediateVT.getScalarType(),
399                   IntermediateVT.getVectorElementCount() * NumParts)
400             : EVT::getVectorVT(*DAG.getContext(),
401                                IntermediateVT.getScalarType(),
402                                NumIntermediates);
403     Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS
404                                                 : ISD::BUILD_VECTOR,
405                       DL, BuiltVectorTy, Ops);
406   }
407 
408   // There is now one part, held in Val.  Correct it to match ValueVT.
409   EVT PartEVT = Val.getValueType();
410 
411   if (PartEVT == ValueVT)
412     return Val;
413 
414   if (PartEVT.isVector()) {
415     // Vector/Vector bitcast.
416     if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
417       return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
418 
419     // If the parts vector has more elements than the value vector, then we
420     // have a vector widening case (e.g. <2 x float> -> <4 x float>).
421     // Extract the elements we want.
422     if (PartEVT.getVectorElementCount() != ValueVT.getVectorElementCount()) {
423       assert((PartEVT.getVectorElementCount().getKnownMinValue() >
424               ValueVT.getVectorElementCount().getKnownMinValue()) &&
425              (PartEVT.getVectorElementCount().isScalable() ==
426               ValueVT.getVectorElementCount().isScalable()) &&
427              "Cannot narrow, it would be a lossy transformation");
428       PartEVT =
429           EVT::getVectorVT(*DAG.getContext(), PartEVT.getVectorElementType(),
430                            ValueVT.getVectorElementCount());
431       Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, PartEVT, Val,
432                         DAG.getVectorIdxConstant(0, DL));
433       if (PartEVT == ValueVT)
434         return Val;
435       if (PartEVT.isInteger() && ValueVT.isFloatingPoint())
436         return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
437 
438       // Vector/Vector bitcast (e.g. <2 x bfloat> -> <2 x half>).
439       if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
440         return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
441     }
442 
443     // Promoted vector extract
444     return DAG.getAnyExtOrTrunc(Val, DL, ValueVT);
445   }
446 
447   // Trivial bitcast if the types are the same size and the destination
448   // vector type is legal.
449   if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits() &&
450       TLI.isTypeLegal(ValueVT))
451     return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
452 
453   if (ValueVT.getVectorNumElements() != 1) {
454      // Certain ABIs require that vectors are passed as integers. For vectors
455      // are the same size, this is an obvious bitcast.
456      if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) {
457        return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
458      } else if (ValueVT.bitsLT(PartEVT)) {
459        const uint64_t ValueSize = ValueVT.getFixedSizeInBits();
460        EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize);
461        // Drop the extra bits.
462        Val = DAG.getNode(ISD::TRUNCATE, DL, IntermediateType, Val);
463        return DAG.getBitcast(ValueVT, Val);
464      }
465 
466      diagnosePossiblyInvalidConstraint(
467          *DAG.getContext(), V, "non-trivial scalar-to-vector conversion");
468      return DAG.getUNDEF(ValueVT);
469   }
470 
471   // Handle cases such as i8 -> <1 x i1>
472   EVT ValueSVT = ValueVT.getVectorElementType();
473   if (ValueVT.getVectorNumElements() == 1 && ValueSVT != PartEVT) {
474     unsigned ValueSize = ValueSVT.getSizeInBits();
475     if (ValueSize == PartEVT.getSizeInBits()) {
476       Val = DAG.getNode(ISD::BITCAST, DL, ValueSVT, Val);
477     } else if (ValueSVT.isFloatingPoint() && PartEVT.isInteger()) {
478       // It's possible a scalar floating point type gets softened to integer and
479       // then promoted to a larger integer. If PartEVT is the larger integer
480       // we need to truncate it and then bitcast to the FP type.
481       assert(ValueSVT.bitsLT(PartEVT) && "Unexpected types");
482       EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize);
483       Val = DAG.getNode(ISD::TRUNCATE, DL, IntermediateType, Val);
484       Val = DAG.getBitcast(ValueSVT, Val);
485     } else {
486       Val = ValueVT.isFloatingPoint()
487                 ? DAG.getFPExtendOrRound(Val, DL, ValueSVT)
488                 : DAG.getAnyExtOrTrunc(Val, DL, ValueSVT);
489     }
490   }
491 
492   return DAG.getBuildVector(ValueVT, DL, Val);
493 }
494 
495 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl,
496                                  SDValue Val, SDValue *Parts, unsigned NumParts,
497                                  MVT PartVT, const Value *V,
498                                  std::optional<CallingConv::ID> CallConv);
499 
500 /// getCopyToParts - Create a series of nodes that contain the specified value
501 /// split into legal parts.  If the parts contain more bits than Val, then, for
502 /// integers, ExtendKind can be used to specify how to generate the extra bits.
503 static void
504 getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
505                unsigned NumParts, MVT PartVT, const Value *V,
506                std::optional<CallingConv::ID> CallConv = std::nullopt,
507                ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
508   // Let the target split the parts if it wants to
509   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
510   if (TLI.splitValueIntoRegisterParts(DAG, DL, Val, Parts, NumParts, PartVT,
511                                       CallConv))
512     return;
513   EVT ValueVT = Val.getValueType();
514 
515   // Handle the vector case separately.
516   if (ValueVT.isVector())
517     return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V,
518                                 CallConv);
519 
520   unsigned OrigNumParts = NumParts;
521   assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) &&
522          "Copying to an illegal type!");
523 
524   if (NumParts == 0)
525     return;
526 
527   assert(!ValueVT.isVector() && "Vector case handled elsewhere");
528   EVT PartEVT = PartVT;
529   if (PartEVT == ValueVT) {
530     assert(NumParts == 1 && "No-op copy with multiple parts!");
531     Parts[0] = Val;
532     return;
533   }
534 
535   unsigned PartBits = PartVT.getSizeInBits();
536   if (NumParts * PartBits > ValueVT.getSizeInBits()) {
537     // If the parts cover more bits than the value has, promote the value.
538     if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
539       assert(NumParts == 1 && "Do not know what to promote to!");
540       Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val);
541     } else {
542       if (ValueVT.isFloatingPoint()) {
543         // FP values need to be bitcast, then extended if they are being put
544         // into a larger container.
545         ValueVT = EVT::getIntegerVT(*DAG.getContext(),  ValueVT.getSizeInBits());
546         Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
547       }
548       assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
549              ValueVT.isInteger() &&
550              "Unknown mismatch!");
551       ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
552       Val = DAG.getNode(ExtendKind, DL, ValueVT, Val);
553       if (PartVT == MVT::x86mmx)
554         Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
555     }
556   } else if (PartBits == ValueVT.getSizeInBits()) {
557     // Different types of the same size.
558     assert(NumParts == 1 && PartEVT != ValueVT);
559     Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
560   } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
561     // If the parts cover less bits than value has, truncate the value.
562     assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
563            ValueVT.isInteger() &&
564            "Unknown mismatch!");
565     ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
566     Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
567     if (PartVT == MVT::x86mmx)
568       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
569   }
570 
571   // The value may have changed - recompute ValueVT.
572   ValueVT = Val.getValueType();
573   assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
574          "Failed to tile the value with PartVT!");
575 
576   if (NumParts == 1) {
577     if (PartEVT != ValueVT) {
578       diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
579                                         "scalar-to-vector conversion failed");
580       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
581     }
582 
583     Parts[0] = Val;
584     return;
585   }
586 
587   // Expand the value into multiple parts.
588   if (NumParts & (NumParts - 1)) {
589     // The number of parts is not a power of 2.  Split off and copy the tail.
590     assert(PartVT.isInteger() && ValueVT.isInteger() &&
591            "Do not know what to expand to!");
592     unsigned RoundParts = llvm::bit_floor(NumParts);
593     unsigned RoundBits = RoundParts * PartBits;
594     unsigned OddParts = NumParts - RoundParts;
595     SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val,
596       DAG.getShiftAmountConstant(RoundBits, ValueVT, DL));
597 
598     getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V,
599                    CallConv);
600 
601     if (DAG.getDataLayout().isBigEndian())
602       // The odd parts were reversed by getCopyToParts - unreverse them.
603       std::reverse(Parts + RoundParts, Parts + NumParts);
604 
605     NumParts = RoundParts;
606     ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
607     Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
608   }
609 
610   // The number of parts is a power of 2.  Repeatedly bisect the value using
611   // EXTRACT_ELEMENT.
612   Parts[0] = DAG.getNode(ISD::BITCAST, DL,
613                          EVT::getIntegerVT(*DAG.getContext(),
614                                            ValueVT.getSizeInBits()),
615                          Val);
616 
617   for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
618     for (unsigned i = 0; i < NumParts; i += StepSize) {
619       unsigned ThisBits = StepSize * PartBits / 2;
620       EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
621       SDValue &Part0 = Parts[i];
622       SDValue &Part1 = Parts[i+StepSize/2];
623 
624       Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
625                           ThisVT, Part0, DAG.getIntPtrConstant(1, DL));
626       Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
627                           ThisVT, Part0, DAG.getIntPtrConstant(0, DL));
628 
629       if (ThisBits == PartBits && ThisVT != PartVT) {
630         Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0);
631         Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1);
632       }
633     }
634   }
635 
636   if (DAG.getDataLayout().isBigEndian())
637     std::reverse(Parts, Parts + OrigNumParts);
638 }
639 
640 static SDValue widenVectorToPartType(SelectionDAG &DAG, SDValue Val,
641                                      const SDLoc &DL, EVT PartVT) {
642   if (!PartVT.isVector())
643     return SDValue();
644 
645   EVT ValueVT = Val.getValueType();
646   EVT PartEVT = PartVT.getVectorElementType();
647   EVT ValueEVT = ValueVT.getVectorElementType();
648   ElementCount PartNumElts = PartVT.getVectorElementCount();
649   ElementCount ValueNumElts = ValueVT.getVectorElementCount();
650 
651   // We only support widening vectors with equivalent element types and
652   // fixed/scalable properties. If a target needs to widen a fixed-length type
653   // to a scalable one, it should be possible to use INSERT_SUBVECTOR below.
654   if (ElementCount::isKnownLE(PartNumElts, ValueNumElts) ||
655       PartNumElts.isScalable() != ValueNumElts.isScalable())
656     return SDValue();
657 
658   // Have a try for bf16 because some targets share its ABI with fp16.
659   if (ValueEVT == MVT::bf16 && PartEVT == MVT::f16) {
660     assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) &&
661            "Cannot widen to illegal type");
662     Val = DAG.getNode(ISD::BITCAST, DL,
663                       ValueVT.changeVectorElementType(MVT::f16), Val);
664   } else if (PartEVT != ValueEVT) {
665     return SDValue();
666   }
667 
668   // Widening a scalable vector to another scalable vector is done by inserting
669   // the vector into a larger undef one.
670   if (PartNumElts.isScalable())
671     return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
672                        Val, DAG.getVectorIdxConstant(0, DL));
673 
674   // Vector widening case, e.g. <2 x float> -> <4 x float>.  Shuffle in
675   // undef elements.
676   SmallVector<SDValue, 16> Ops;
677   DAG.ExtractVectorElements(Val, Ops);
678   SDValue EltUndef = DAG.getUNDEF(PartEVT);
679   Ops.append((PartNumElts - ValueNumElts).getFixedValue(), EltUndef);
680 
681   // FIXME: Use CONCAT for 2x -> 4x.
682   return DAG.getBuildVector(PartVT, DL, Ops);
683 }
684 
685 /// getCopyToPartsVector - Create a series of nodes that contain the specified
686 /// value split into legal parts.
687 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
688                                  SDValue Val, SDValue *Parts, unsigned NumParts,
689                                  MVT PartVT, const Value *V,
690                                  std::optional<CallingConv::ID> CallConv) {
691   EVT ValueVT = Val.getValueType();
692   assert(ValueVT.isVector() && "Not a vector");
693   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
694   const bool IsABIRegCopy = CallConv.has_value();
695 
696   if (NumParts == 1) {
697     EVT PartEVT = PartVT;
698     if (PartEVT == ValueVT) {
699       // Nothing to do.
700     } else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
701       // Bitconvert vector->vector case.
702       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
703     } else if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, PartVT)) {
704       Val = Widened;
705     } else if (PartVT.isVector() &&
706                PartEVT.getVectorElementType().bitsGE(
707                    ValueVT.getVectorElementType()) &&
708                PartEVT.getVectorElementCount() ==
709                    ValueVT.getVectorElementCount()) {
710 
711       // Promoted vector extract
712       Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
713     } else if (PartEVT.isVector() &&
714                PartEVT.getVectorElementType() !=
715                    ValueVT.getVectorElementType() &&
716                TLI.getTypeAction(*DAG.getContext(), ValueVT) ==
717                    TargetLowering::TypeWidenVector) {
718       // Combination of widening and promotion.
719       EVT WidenVT =
720           EVT::getVectorVT(*DAG.getContext(), ValueVT.getVectorElementType(),
721                            PartVT.getVectorElementCount());
722       SDValue Widened = widenVectorToPartType(DAG, Val, DL, WidenVT);
723       Val = DAG.getAnyExtOrTrunc(Widened, DL, PartVT);
724     } else {
725       // Don't extract an integer from a float vector. This can happen if the
726       // FP type gets softened to integer and then promoted. The promotion
727       // prevents it from being picked up by the earlier bitcast case.
728       if (ValueVT.getVectorElementCount().isScalar() &&
729           (!ValueVT.isFloatingPoint() || !PartVT.isInteger())) {
730         // If we reach this condition and PartVT is FP, this means that
731         // ValueVT is also FP and both have a different size, otherwise we
732         // would have bitcasted them. Producing an EXTRACT_VECTOR_ELT here
733         // would be invalid since that would mean the smaller FP type has to
734         // be extended to the larger one.
735         if (PartVT.isFloatingPoint()) {
736           Val = DAG.getBitcast(ValueVT.getScalarType(), Val);
737           Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val);
738         } else
739           Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, PartVT, Val,
740                             DAG.getVectorIdxConstant(0, DL));
741       } else {
742         uint64_t ValueSize = ValueVT.getFixedSizeInBits();
743         assert(PartVT.getFixedSizeInBits() > ValueSize &&
744                "lossy conversion of vector to scalar type");
745         EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize);
746         Val = DAG.getBitcast(IntermediateType, Val);
747         Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
748       }
749     }
750 
751     assert(Val.getValueType() == PartVT && "Unexpected vector part value type");
752     Parts[0] = Val;
753     return;
754   }
755 
756   // Handle a multi-element vector.
757   EVT IntermediateVT;
758   MVT RegisterVT;
759   unsigned NumIntermediates;
760   unsigned NumRegs;
761   if (IsABIRegCopy) {
762     NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
763         *DAG.getContext(), *CallConv, ValueVT, IntermediateVT, NumIntermediates,
764         RegisterVT);
765   } else {
766     NumRegs =
767         TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
768                                    NumIntermediates, RegisterVT);
769   }
770 
771   assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
772   NumParts = NumRegs; // Silence a compiler warning.
773   assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
774 
775   assert(IntermediateVT.isScalableVector() == ValueVT.isScalableVector() &&
776          "Mixing scalable and fixed vectors when copying in parts");
777 
778   std::optional<ElementCount> DestEltCnt;
779 
780   if (IntermediateVT.isVector())
781     DestEltCnt = IntermediateVT.getVectorElementCount() * NumIntermediates;
782   else
783     DestEltCnt = ElementCount::getFixed(NumIntermediates);
784 
785   EVT BuiltVectorTy = EVT::getVectorVT(
786       *DAG.getContext(), IntermediateVT.getScalarType(), *DestEltCnt);
787 
788   if (ValueVT == BuiltVectorTy) {
789     // Nothing to do.
790   } else if (ValueVT.getSizeInBits() == BuiltVectorTy.getSizeInBits()) {
791     // Bitconvert vector->vector case.
792     Val = DAG.getNode(ISD::BITCAST, DL, BuiltVectorTy, Val);
793   } else {
794     if (BuiltVectorTy.getVectorElementType().bitsGT(
795             ValueVT.getVectorElementType())) {
796       // Integer promotion.
797       ValueVT = EVT::getVectorVT(*DAG.getContext(),
798                                  BuiltVectorTy.getVectorElementType(),
799                                  ValueVT.getVectorElementCount());
800       Val = DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
801     }
802 
803     if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, BuiltVectorTy)) {
804       Val = Widened;
805     }
806   }
807 
808   assert(Val.getValueType() == BuiltVectorTy && "Unexpected vector value type");
809 
810   // Split the vector into intermediate operands.
811   SmallVector<SDValue, 8> Ops(NumIntermediates);
812   for (unsigned i = 0; i != NumIntermediates; ++i) {
813     if (IntermediateVT.isVector()) {
814       // This does something sensible for scalable vectors - see the
815       // definition of EXTRACT_SUBVECTOR for further details.
816       unsigned IntermediateNumElts = IntermediateVT.getVectorMinNumElements();
817       Ops[i] =
818           DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, IntermediateVT, Val,
819                       DAG.getVectorIdxConstant(i * IntermediateNumElts, DL));
820     } else {
821       Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, IntermediateVT, Val,
822                            DAG.getVectorIdxConstant(i, DL));
823     }
824   }
825 
826   // Split the intermediate operands into legal parts.
827   if (NumParts == NumIntermediates) {
828     // If the register was not expanded, promote or copy the value,
829     // as appropriate.
830     for (unsigned i = 0; i != NumParts; ++i)
831       getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V, CallConv);
832   } else if (NumParts > 0) {
833     // If the intermediate type was expanded, split each the value into
834     // legal parts.
835     assert(NumIntermediates != 0 && "division by zero");
836     assert(NumParts % NumIntermediates == 0 &&
837            "Must expand into a divisible number of parts!");
838     unsigned Factor = NumParts / NumIntermediates;
839     for (unsigned i = 0; i != NumIntermediates; ++i)
840       getCopyToParts(DAG, DL, Ops[i], &Parts[i * Factor], Factor, PartVT, V,
841                      CallConv);
842   }
843 }
844 
845 RegsForValue::RegsForValue(const SmallVector<Register, 4> &regs, MVT regvt,
846                            EVT valuevt, std::optional<CallingConv::ID> CC)
847     : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs),
848       RegCount(1, regs.size()), CallConv(CC) {}
849 
850 RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
851                            const DataLayout &DL, Register Reg, Type *Ty,
852                            std::optional<CallingConv::ID> CC) {
853   ComputeValueVTs(TLI, DL, Ty, ValueVTs);
854 
855   CallConv = CC;
856 
857   for (EVT ValueVT : ValueVTs) {
858     unsigned NumRegs =
859         isABIMangled()
860             ? TLI.getNumRegistersForCallingConv(Context, *CC, ValueVT)
861             : TLI.getNumRegisters(Context, ValueVT);
862     MVT RegisterVT =
863         isABIMangled()
864             ? TLI.getRegisterTypeForCallingConv(Context, *CC, ValueVT)
865             : TLI.getRegisterType(Context, ValueVT);
866     for (unsigned i = 0; i != NumRegs; ++i)
867       Regs.push_back(Reg + i);
868     RegVTs.push_back(RegisterVT);
869     RegCount.push_back(NumRegs);
870     Reg = Reg.id() + NumRegs;
871   }
872 }
873 
874 SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
875                                       FunctionLoweringInfo &FuncInfo,
876                                       const SDLoc &dl, SDValue &Chain,
877                                       SDValue *Glue, const Value *V) const {
878   // A Value with type {} or [0 x %t] needs no registers.
879   if (ValueVTs.empty())
880     return SDValue();
881 
882   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
883 
884   // Assemble the legal parts into the final values.
885   SmallVector<SDValue, 4> Values(ValueVTs.size());
886   SmallVector<SDValue, 8> Parts;
887   for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
888     // Copy the legal parts from the registers.
889     EVT ValueVT = ValueVTs[Value];
890     unsigned NumRegs = RegCount[Value];
891     MVT RegisterVT = isABIMangled()
892                          ? TLI.getRegisterTypeForCallingConv(
893                                *DAG.getContext(), *CallConv, RegVTs[Value])
894                          : RegVTs[Value];
895 
896     Parts.resize(NumRegs);
897     for (unsigned i = 0; i != NumRegs; ++i) {
898       SDValue P;
899       if (!Glue) {
900         P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
901       } else {
902         P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Glue);
903         *Glue = P.getValue(2);
904       }
905 
906       Chain = P.getValue(1);
907       Parts[i] = P;
908 
909       // If the source register was virtual and if we know something about it,
910       // add an assert node.
911       if (!Register::isVirtualRegister(Regs[Part + i]) ||
912           !RegisterVT.isInteger())
913         continue;
914 
915       const FunctionLoweringInfo::LiveOutInfo *LOI =
916         FuncInfo.GetLiveOutRegInfo(Regs[Part+i]);
917       if (!LOI)
918         continue;
919 
920       unsigned RegSize = RegisterVT.getScalarSizeInBits();
921       unsigned NumSignBits = LOI->NumSignBits;
922       unsigned NumZeroBits = LOI->Known.countMinLeadingZeros();
923 
924       if (NumZeroBits == RegSize) {
925         // The current value is a zero.
926         // Explicitly express that as it would be easier for
927         // optimizations to kick in.
928         Parts[i] = DAG.getConstant(0, dl, RegisterVT);
929         continue;
930       }
931 
932       // FIXME: We capture more information than the dag can represent.  For
933       // now, just use the tightest assertzext/assertsext possible.
934       bool isSExt;
935       EVT FromVT(MVT::Other);
936       if (NumZeroBits) {
937         FromVT = EVT::getIntegerVT(*DAG.getContext(), RegSize - NumZeroBits);
938         isSExt = false;
939       } else if (NumSignBits > 1) {
940         FromVT =
941             EVT::getIntegerVT(*DAG.getContext(), RegSize - NumSignBits + 1);
942         isSExt = true;
943       } else {
944         continue;
945       }
946       // Add an assertion node.
947       assert(FromVT != MVT::Other);
948       Parts[i] = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
949                              RegisterVT, P, DAG.getValueType(FromVT));
950     }
951 
952     Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(), NumRegs,
953                                      RegisterVT, ValueVT, V, Chain, CallConv);
954     Part += NumRegs;
955     Parts.clear();
956   }
957 
958   return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(ValueVTs), Values);
959 }
960 
961 void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG,
962                                  const SDLoc &dl, SDValue &Chain, SDValue *Glue,
963                                  const Value *V,
964                                  ISD::NodeType PreferredExtendType) const {
965   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
966   ISD::NodeType ExtendKind = PreferredExtendType;
967 
968   // Get the list of the values's legal parts.
969   unsigned NumRegs = Regs.size();
970   SmallVector<SDValue, 8> Parts(NumRegs);
971   for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
972     unsigned NumParts = RegCount[Value];
973 
974     MVT RegisterVT = isABIMangled()
975                          ? TLI.getRegisterTypeForCallingConv(
976                                *DAG.getContext(), *CallConv, RegVTs[Value])
977                          : RegVTs[Value];
978 
979     if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT))
980       ExtendKind = ISD::ZERO_EXTEND;
981 
982     getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value), &Parts[Part],
983                    NumParts, RegisterVT, V, CallConv, ExtendKind);
984     Part += NumParts;
985   }
986 
987   // Copy the parts into the registers.
988   SmallVector<SDValue, 8> Chains(NumRegs);
989   for (unsigned i = 0; i != NumRegs; ++i) {
990     SDValue Part;
991     if (!Glue) {
992       Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
993     } else {
994       Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Glue);
995       *Glue = Part.getValue(1);
996     }
997 
998     Chains[i] = Part.getValue(0);
999   }
1000 
1001   if (NumRegs == 1 || Glue)
1002     // If NumRegs > 1 && Glue is used then the use of the last CopyToReg is
1003     // flagged to it. That is the CopyToReg nodes and the user are considered
1004     // a single scheduling unit. If we create a TokenFactor and return it as
1005     // chain, then the TokenFactor is both a predecessor (operand) of the
1006     // user as well as a successor (the TF operands are flagged to the user).
1007     // c1, f1 = CopyToReg
1008     // c2, f2 = CopyToReg
1009     // c3     = TokenFactor c1, c2
1010     // ...
1011     //        = op c3, ..., f2
1012     Chain = Chains[NumRegs-1];
1013   else
1014     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
1015 }
1016 
1017 void RegsForValue::AddInlineAsmOperands(InlineAsm::Kind Code, bool HasMatching,
1018                                         unsigned MatchingIdx, const SDLoc &dl,
1019                                         SelectionDAG &DAG,
1020                                         std::vector<SDValue> &Ops) const {
1021   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1022 
1023   InlineAsm::Flag Flag(Code, Regs.size());
1024   if (HasMatching)
1025     Flag.setMatchingOp(MatchingIdx);
1026   else if (!Regs.empty() && Register::isVirtualRegister(Regs.front())) {
1027     // Put the register class of the virtual registers in the flag word.  That
1028     // way, later passes can recompute register class constraints for inline
1029     // assembly as well as normal instructions.
1030     // Don't do this for tied operands that can use the regclass information
1031     // from the def.
1032     const MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
1033     const TargetRegisterClass *RC = MRI.getRegClass(Regs.front());
1034     Flag.setRegClass(RC->getID());
1035   }
1036 
1037   SDValue Res = DAG.getTargetConstant(Flag, dl, MVT::i32);
1038   Ops.push_back(Res);
1039 
1040   if (Code == InlineAsm::Kind::Clobber) {
1041     // Clobbers should always have a 1:1 mapping with registers, and may
1042     // reference registers that have illegal (e.g. vector) types. Hence, we
1043     // shouldn't try to apply any sort of splitting logic to them.
1044     assert(Regs.size() == RegVTs.size() && Regs.size() == ValueVTs.size() &&
1045            "No 1:1 mapping from clobbers to regs?");
1046     Register SP = TLI.getStackPointerRegisterToSaveRestore();
1047     (void)SP;
1048     for (unsigned I = 0, E = ValueVTs.size(); I != E; ++I) {
1049       Ops.push_back(DAG.getRegister(Regs[I], RegVTs[I]));
1050       assert(
1051           (Regs[I] != SP ||
1052            DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment()) &&
1053           "If we clobbered the stack pointer, MFI should know about it.");
1054     }
1055     return;
1056   }
1057 
1058   for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
1059     MVT RegisterVT = RegVTs[Value];
1060     unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value],
1061                                            RegisterVT);
1062     for (unsigned i = 0; i != NumRegs; ++i) {
1063       assert(Reg < Regs.size() && "Mismatch in # registers expected");
1064       unsigned TheReg = Regs[Reg++];
1065       Ops.push_back(DAG.getRegister(TheReg, RegisterVT));
1066     }
1067   }
1068 }
1069 
1070 SmallVector<std::pair<Register, TypeSize>, 4>
1071 RegsForValue::getRegsAndSizes() const {
1072   SmallVector<std::pair<Register, TypeSize>, 4> OutVec;
1073   unsigned I = 0;
1074   for (auto CountAndVT : zip_first(RegCount, RegVTs)) {
1075     unsigned RegCount = std::get<0>(CountAndVT);
1076     MVT RegisterVT = std::get<1>(CountAndVT);
1077     TypeSize RegisterSize = RegisterVT.getSizeInBits();
1078     for (unsigned E = I + RegCount; I != E; ++I)
1079       OutVec.push_back(std::make_pair(Regs[I], RegisterSize));
1080   }
1081   return OutVec;
1082 }
1083 
1084 void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis *aa,
1085                                AssumptionCache *ac,
1086                                const TargetLibraryInfo *li) {
1087   AA = aa;
1088   AC = ac;
1089   GFI = gfi;
1090   LibInfo = li;
1091   Context = DAG.getContext();
1092   LPadToCallSiteMap.clear();
1093   SL->init(DAG.getTargetLoweringInfo(), TM, DAG.getDataLayout());
1094   AssignmentTrackingEnabled = isAssignmentTrackingEnabled(
1095       *DAG.getMachineFunction().getFunction().getParent());
1096 }
1097 
1098 void SelectionDAGBuilder::clear() {
1099   NodeMap.clear();
1100   UnusedArgNodeMap.clear();
1101   PendingLoads.clear();
1102   PendingExports.clear();
1103   PendingConstrainedFP.clear();
1104   PendingConstrainedFPStrict.clear();
1105   CurInst = nullptr;
1106   HasTailCall = false;
1107   SDNodeOrder = LowestSDNodeOrder;
1108   StatepointLowering.clear();
1109 }
1110 
1111 void SelectionDAGBuilder::clearDanglingDebugInfo() {
1112   DanglingDebugInfoMap.clear();
1113 }
1114 
1115 // Update DAG root to include dependencies on Pending chains.
1116 SDValue SelectionDAGBuilder::updateRoot(SmallVectorImpl<SDValue> &Pending) {
1117   SDValue Root = DAG.getRoot();
1118 
1119   if (Pending.empty())
1120     return Root;
1121 
1122   // Add current root to PendingChains, unless we already indirectly
1123   // depend on it.
1124   if (Root.getOpcode() != ISD::EntryToken) {
1125     unsigned i = 0, e = Pending.size();
1126     for (; i != e; ++i) {
1127       assert(Pending[i].getNode()->getNumOperands() > 1);
1128       if (Pending[i].getNode()->getOperand(0) == Root)
1129         break;  // Don't add the root if we already indirectly depend on it.
1130     }
1131 
1132     if (i == e)
1133       Pending.push_back(Root);
1134   }
1135 
1136   if (Pending.size() == 1)
1137     Root = Pending[0];
1138   else
1139     Root = DAG.getTokenFactor(getCurSDLoc(), Pending);
1140 
1141   DAG.setRoot(Root);
1142   Pending.clear();
1143   return Root;
1144 }
1145 
1146 SDValue SelectionDAGBuilder::getMemoryRoot() {
1147   return updateRoot(PendingLoads);
1148 }
1149 
1150 SDValue SelectionDAGBuilder::getRoot() {
1151   // Chain up all pending constrained intrinsics together with all
1152   // pending loads, by simply appending them to PendingLoads and
1153   // then calling getMemoryRoot().
1154   PendingLoads.reserve(PendingLoads.size() +
1155                        PendingConstrainedFP.size() +
1156                        PendingConstrainedFPStrict.size());
1157   PendingLoads.append(PendingConstrainedFP.begin(),
1158                       PendingConstrainedFP.end());
1159   PendingLoads.append(PendingConstrainedFPStrict.begin(),
1160                       PendingConstrainedFPStrict.end());
1161   PendingConstrainedFP.clear();
1162   PendingConstrainedFPStrict.clear();
1163   return getMemoryRoot();
1164 }
1165 
1166 SDValue SelectionDAGBuilder::getControlRoot() {
1167   // We need to emit pending fpexcept.strict constrained intrinsics,
1168   // so append them to the PendingExports list.
1169   PendingExports.append(PendingConstrainedFPStrict.begin(),
1170                         PendingConstrainedFPStrict.end());
1171   PendingConstrainedFPStrict.clear();
1172   return updateRoot(PendingExports);
1173 }
1174 
1175 void SelectionDAGBuilder::handleDebugDeclare(Value *Address,
1176                                              DILocalVariable *Variable,
1177                                              DIExpression *Expression,
1178                                              DebugLoc DL) {
1179   assert(Variable && "Missing variable");
1180 
1181   // Check if address has undef value.
1182   if (!Address || isa<UndefValue>(Address) ||
1183       (Address->use_empty() && !isa<Argument>(Address))) {
1184     LLVM_DEBUG(
1185         dbgs()
1186         << "dbg_declare: Dropping debug info (bad/undef/unused-arg address)\n");
1187     return;
1188   }
1189 
1190   bool IsParameter = Variable->isParameter() || isa<Argument>(Address);
1191 
1192   SDValue &N = NodeMap[Address];
1193   if (!N.getNode() && isa<Argument>(Address))
1194     // Check unused arguments map.
1195     N = UnusedArgNodeMap[Address];
1196   SDDbgValue *SDV;
1197   if (N.getNode()) {
1198     if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
1199       Address = BCI->getOperand(0);
1200     // Parameters are handled specially.
1201     auto *FINode = dyn_cast<FrameIndexSDNode>(N.getNode());
1202     if (IsParameter && FINode) {
1203       // Byval parameter. We have a frame index at this point.
1204       SDV = DAG.getFrameIndexDbgValue(Variable, Expression, FINode->getIndex(),
1205                                       /*IsIndirect*/ true, DL, SDNodeOrder);
1206     } else if (isa<Argument>(Address)) {
1207       // Address is an argument, so try to emit its dbg value using
1208       // virtual register info from the FuncInfo.ValueMap.
1209       EmitFuncArgumentDbgValue(Address, Variable, Expression, DL,
1210                                FuncArgumentDbgValueKind::Declare, N);
1211       return;
1212     } else {
1213       SDV = DAG.getDbgValue(Variable, Expression, N.getNode(), N.getResNo(),
1214                             true, DL, SDNodeOrder);
1215     }
1216     DAG.AddDbgValue(SDV, IsParameter);
1217   } else {
1218     // If Address is an argument then try to emit its dbg value using
1219     // virtual register info from the FuncInfo.ValueMap.
1220     if (!EmitFuncArgumentDbgValue(Address, Variable, Expression, DL,
1221                                   FuncArgumentDbgValueKind::Declare, N)) {
1222       LLVM_DEBUG(dbgs() << "dbg_declare: Dropping debug info"
1223                         << " (could not emit func-arg dbg_value)\n");
1224     }
1225   }
1226   return;
1227 }
1228 
1229 void SelectionDAGBuilder::visitDbgInfo(const Instruction &I) {
1230   // Add SDDbgValue nodes for any var locs here. Do so before updating
1231   // SDNodeOrder, as this mapping is {Inst -> Locs BEFORE Inst}.
1232   if (FunctionVarLocs const *FnVarLocs = DAG.getFunctionVarLocs()) {
1233     // Add SDDbgValue nodes for any var locs here. Do so before updating
1234     // SDNodeOrder, as this mapping is {Inst -> Locs BEFORE Inst}.
1235     for (auto It = FnVarLocs->locs_begin(&I), End = FnVarLocs->locs_end(&I);
1236          It != End; ++It) {
1237       auto *Var = FnVarLocs->getDILocalVariable(It->VariableID);
1238       dropDanglingDebugInfo(Var, It->Expr);
1239       if (It->Values.isKillLocation(It->Expr)) {
1240         handleKillDebugValue(Var, It->Expr, It->DL, SDNodeOrder);
1241         continue;
1242       }
1243       SmallVector<Value *> Values(It->Values.location_ops());
1244       if (!handleDebugValue(Values, Var, It->Expr, It->DL, SDNodeOrder,
1245                             It->Values.hasArgList())) {
1246         SmallVector<Value *, 4> Vals(It->Values.location_ops());
1247         addDanglingDebugInfo(Vals,
1248                              FnVarLocs->getDILocalVariable(It->VariableID),
1249                              It->Expr, Vals.size() > 1, It->DL, SDNodeOrder);
1250       }
1251     }
1252   }
1253 
1254   // We must skip DbgVariableRecords if they've already been processed above as
1255   // we have just emitted the debug values resulting from assignment tracking
1256   // analysis, making any existing DbgVariableRecords redundant (and probably
1257   // less correct). We still need to process DbgLabelRecords. This does sink
1258   // DbgLabelRecords to the bottom of the group of debug records. That sholdn't
1259   // be important as it does so deterministcally and ordering between
1260   // DbgLabelRecords and DbgVariableRecords is immaterial (other than for MIR/IR
1261   // printing).
1262   bool SkipDbgVariableRecords = DAG.getFunctionVarLocs();
1263   // Is there is any debug-info attached to this instruction, in the form of
1264   // DbgRecord non-instruction debug-info records.
1265   for (DbgRecord &DR : I.getDbgRecordRange()) {
1266     if (DbgLabelRecord *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
1267       assert(DLR->getLabel() && "Missing label");
1268       SDDbgLabel *SDV =
1269           DAG.getDbgLabel(DLR->getLabel(), DLR->getDebugLoc(), SDNodeOrder);
1270       DAG.AddDbgLabel(SDV);
1271       continue;
1272     }
1273 
1274     if (SkipDbgVariableRecords)
1275       continue;
1276     DbgVariableRecord &DVR = cast<DbgVariableRecord>(DR);
1277     DILocalVariable *Variable = DVR.getVariable();
1278     DIExpression *Expression = DVR.getExpression();
1279     dropDanglingDebugInfo(Variable, Expression);
1280 
1281     if (DVR.getType() == DbgVariableRecord::LocationType::Declare) {
1282       if (FuncInfo.PreprocessedDVRDeclares.contains(&DVR))
1283         continue;
1284       LLVM_DEBUG(dbgs() << "SelectionDAG visiting dbg_declare: " << DVR
1285                         << "\n");
1286       handleDebugDeclare(DVR.getVariableLocationOp(0), Variable, Expression,
1287                          DVR.getDebugLoc());
1288       continue;
1289     }
1290 
1291     // A DbgVariableRecord with no locations is a kill location.
1292     SmallVector<Value *, 4> Values(DVR.location_ops());
1293     if (Values.empty()) {
1294       handleKillDebugValue(Variable, Expression, DVR.getDebugLoc(),
1295                            SDNodeOrder);
1296       continue;
1297     }
1298 
1299     // A DbgVariableRecord with an undef or absent location is also a kill
1300     // location.
1301     if (llvm::any_of(Values,
1302                      [](Value *V) { return !V || isa<UndefValue>(V); })) {
1303       handleKillDebugValue(Variable, Expression, DVR.getDebugLoc(),
1304                            SDNodeOrder);
1305       continue;
1306     }
1307 
1308     bool IsVariadic = DVR.hasArgList();
1309     if (!handleDebugValue(Values, Variable, Expression, DVR.getDebugLoc(),
1310                           SDNodeOrder, IsVariadic)) {
1311       addDanglingDebugInfo(Values, Variable, Expression, IsVariadic,
1312                            DVR.getDebugLoc(), SDNodeOrder);
1313     }
1314   }
1315 }
1316 
1317 void SelectionDAGBuilder::visit(const Instruction &I) {
1318   visitDbgInfo(I);
1319 
1320   // Set up outgoing PHI node register values before emitting the terminator.
1321   if (I.isTerminator()) {
1322     HandlePHINodesInSuccessorBlocks(I.getParent());
1323   }
1324 
1325   // Increase the SDNodeOrder if dealing with a non-debug instruction.
1326   if (!isa<DbgInfoIntrinsic>(I))
1327     ++SDNodeOrder;
1328 
1329   CurInst = &I;
1330 
1331   // Set inserted listener only if required.
1332   bool NodeInserted = false;
1333   std::unique_ptr<SelectionDAG::DAGNodeInsertedListener> InsertedListener;
1334   MDNode *PCSectionsMD = I.getMetadata(LLVMContext::MD_pcsections);
1335   MDNode *MMRA = I.getMetadata(LLVMContext::MD_mmra);
1336   if (PCSectionsMD || MMRA) {
1337     InsertedListener = std::make_unique<SelectionDAG::DAGNodeInsertedListener>(
1338         DAG, [&](SDNode *) { NodeInserted = true; });
1339   }
1340 
1341   visit(I.getOpcode(), I);
1342 
1343   if (!I.isTerminator() && !HasTailCall &&
1344       !isa<GCStatepointInst>(I)) // statepoints handle their exports internally
1345     CopyToExportRegsIfNeeded(&I);
1346 
1347   // Handle metadata.
1348   if (PCSectionsMD || MMRA) {
1349     auto It = NodeMap.find(&I);
1350     if (It != NodeMap.end()) {
1351       if (PCSectionsMD)
1352         DAG.addPCSections(It->second.getNode(), PCSectionsMD);
1353       if (MMRA)
1354         DAG.addMMRAMetadata(It->second.getNode(), MMRA);
1355     } else if (NodeInserted) {
1356       // This should not happen; if it does, don't let it go unnoticed so we can
1357       // fix it. Relevant visit*() function is probably missing a setValue().
1358       errs() << "warning: loosing !pcsections and/or !mmra metadata ["
1359              << I.getModule()->getName() << "]\n";
1360       LLVM_DEBUG(I.dump());
1361       assert(false);
1362     }
1363   }
1364 
1365   CurInst = nullptr;
1366 }
1367 
1368 void SelectionDAGBuilder::visitPHI(const PHINode &) {
1369   llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!");
1370 }
1371 
1372 void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) {
1373   // Note: this doesn't use InstVisitor, because it has to work with
1374   // ConstantExpr's in addition to instructions.
1375   switch (Opcode) {
1376   default: llvm_unreachable("Unknown instruction type encountered!");
1377     // Build the switch statement using the Instruction.def file.
1378 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1379     case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
1380 #include "llvm/IR/Instruction.def"
1381   }
1382 }
1383 
1384 static bool handleDanglingVariadicDebugInfo(SelectionDAG &DAG,
1385                                             DILocalVariable *Variable,
1386                                             DebugLoc DL, unsigned Order,
1387                                             SmallVectorImpl<Value *> &Values,
1388                                             DIExpression *Expression) {
1389   // For variadic dbg_values we will now insert an undef.
1390   // FIXME: We can potentially recover these!
1391   SmallVector<SDDbgOperand, 2> Locs;
1392   for (const Value *V : Values) {
1393     auto *Undef = UndefValue::get(V->getType());
1394     Locs.push_back(SDDbgOperand::fromConst(Undef));
1395   }
1396   SDDbgValue *SDV = DAG.getDbgValueList(Variable, Expression, Locs, {},
1397                                         /*IsIndirect=*/false, DL, Order,
1398                                         /*IsVariadic=*/true);
1399   DAG.AddDbgValue(SDV, /*isParameter=*/false);
1400   return true;
1401 }
1402 
1403 void SelectionDAGBuilder::addDanglingDebugInfo(SmallVectorImpl<Value *> &Values,
1404                                                DILocalVariable *Var,
1405                                                DIExpression *Expr,
1406                                                bool IsVariadic, DebugLoc DL,
1407                                                unsigned Order) {
1408   if (IsVariadic) {
1409     handleDanglingVariadicDebugInfo(DAG, Var, DL, Order, Values, Expr);
1410     return;
1411   }
1412   // TODO: Dangling debug info will eventually either be resolved or produce
1413   // an Undef DBG_VALUE. However in the resolution case, a gap may appear
1414   // between the original dbg.value location and its resolved DBG_VALUE,
1415   // which we should ideally fill with an extra Undef DBG_VALUE.
1416   assert(Values.size() == 1);
1417   DanglingDebugInfoMap[Values[0]].emplace_back(Var, Expr, DL, Order);
1418 }
1419 
1420 void SelectionDAGBuilder::dropDanglingDebugInfo(const DILocalVariable *Variable,
1421                                                 const DIExpression *Expr) {
1422   auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) {
1423     DIVariable *DanglingVariable = DDI.getVariable();
1424     DIExpression *DanglingExpr = DDI.getExpression();
1425     if (DanglingVariable == Variable && Expr->fragmentsOverlap(DanglingExpr)) {
1426       LLVM_DEBUG(dbgs() << "Dropping dangling debug info for "
1427                         << printDDI(nullptr, DDI) << "\n");
1428       return true;
1429     }
1430     return false;
1431   };
1432 
1433   for (auto &DDIMI : DanglingDebugInfoMap) {
1434     DanglingDebugInfoVector &DDIV = DDIMI.second;
1435 
1436     // If debug info is to be dropped, run it through final checks to see
1437     // whether it can be salvaged.
1438     for (auto &DDI : DDIV)
1439       if (isMatchingDbgValue(DDI))
1440         salvageUnresolvedDbgValue(DDIMI.first, DDI);
1441 
1442     erase_if(DDIV, isMatchingDbgValue);
1443   }
1444 }
1445 
1446 // resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V,
1447 // generate the debug data structures now that we've seen its definition.
1448 void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V,
1449                                                    SDValue Val) {
1450   auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V);
1451   if (DanglingDbgInfoIt == DanglingDebugInfoMap.end())
1452     return;
1453 
1454   DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
1455   for (auto &DDI : DDIV) {
1456     DebugLoc DL = DDI.getDebugLoc();
1457     unsigned ValSDNodeOrder = Val.getNode()->getIROrder();
1458     unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
1459     DILocalVariable *Variable = DDI.getVariable();
1460     DIExpression *Expr = DDI.getExpression();
1461     assert(Variable->isValidLocationForIntrinsic(DL) &&
1462            "Expected inlined-at fields to agree");
1463     SDDbgValue *SDV;
1464     if (Val.getNode()) {
1465       // FIXME: I doubt that it is correct to resolve a dangling DbgValue as a
1466       // FuncArgumentDbgValue (it would be hoisted to the function entry, and if
1467       // we couldn't resolve it directly when examining the DbgValue intrinsic
1468       // in the first place we should not be more successful here). Unless we
1469       // have some test case that prove this to be correct we should avoid
1470       // calling EmitFuncArgumentDbgValue here.
1471       if (!EmitFuncArgumentDbgValue(V, Variable, Expr, DL,
1472                                     FuncArgumentDbgValueKind::Value, Val)) {
1473         LLVM_DEBUG(dbgs() << "Resolve dangling debug info for "
1474                           << printDDI(V, DDI) << "\n");
1475         LLVM_DEBUG(dbgs() << "  By mapping to:\n    "; Val.dump());
1476         // Increase the SDNodeOrder for the DbgValue here to make sure it is
1477         // inserted after the definition of Val when emitting the instructions
1478         // after ISel. An alternative could be to teach
1479         // ScheduleDAGSDNodes::EmitSchedule to delay the insertion properly.
1480         LLVM_DEBUG(if (ValSDNodeOrder > DbgSDNodeOrder) dbgs()
1481                    << "changing SDNodeOrder from " << DbgSDNodeOrder << " to "
1482                    << ValSDNodeOrder << "\n");
1483         SDV = getDbgValue(Val, Variable, Expr, DL,
1484                           std::max(DbgSDNodeOrder, ValSDNodeOrder));
1485         DAG.AddDbgValue(SDV, false);
1486       } else
1487         LLVM_DEBUG(dbgs() << "Resolved dangling debug info for "
1488                           << printDDI(V, DDI)
1489                           << " in EmitFuncArgumentDbgValue\n");
1490     } else {
1491       LLVM_DEBUG(dbgs() << "Dropping debug info for " << printDDI(V, DDI)
1492                         << "\n");
1493       auto Undef = UndefValue::get(V->getType());
1494       auto SDV =
1495           DAG.getConstantDbgValue(Variable, Expr, Undef, DL, DbgSDNodeOrder);
1496       DAG.AddDbgValue(SDV, false);
1497     }
1498   }
1499   DDIV.clear();
1500 }
1501 
1502 void SelectionDAGBuilder::salvageUnresolvedDbgValue(const Value *V,
1503                                                     DanglingDebugInfo &DDI) {
1504   // TODO: For the variadic implementation, instead of only checking the fail
1505   // state of `handleDebugValue`, we need know specifically which values were
1506   // invalid, so that we attempt to salvage only those values when processing
1507   // a DIArgList.
1508   const Value *OrigV = V;
1509   DILocalVariable *Var = DDI.getVariable();
1510   DIExpression *Expr = DDI.getExpression();
1511   DebugLoc DL = DDI.getDebugLoc();
1512   unsigned SDOrder = DDI.getSDNodeOrder();
1513 
1514   // Currently we consider only dbg.value intrinsics -- we tell the salvager
1515   // that DW_OP_stack_value is desired.
1516   bool StackValue = true;
1517 
1518   // Can this Value can be encoded without any further work?
1519   if (handleDebugValue(V, Var, Expr, DL, SDOrder, /*IsVariadic=*/false))
1520     return;
1521 
1522   // Attempt to salvage back through as many instructions as possible. Bail if
1523   // a non-instruction is seen, such as a constant expression or global
1524   // variable. FIXME: Further work could recover those too.
1525   while (isa<Instruction>(V)) {
1526     const Instruction &VAsInst = *cast<const Instruction>(V);
1527     // Temporary "0", awaiting real implementation.
1528     SmallVector<uint64_t, 16> Ops;
1529     SmallVector<Value *, 4> AdditionalValues;
1530     V = salvageDebugInfoImpl(const_cast<Instruction &>(VAsInst),
1531                              Expr->getNumLocationOperands(), Ops,
1532                              AdditionalValues);
1533     // If we cannot salvage any further, and haven't yet found a suitable debug
1534     // expression, bail out.
1535     if (!V)
1536       break;
1537 
1538     // TODO: If AdditionalValues isn't empty, then the salvage can only be
1539     // represented with a DBG_VALUE_LIST, so we give up. When we have support
1540     // here for variadic dbg_values, remove that condition.
1541     if (!AdditionalValues.empty())
1542       break;
1543 
1544     // New value and expr now represent this debuginfo.
1545     Expr = DIExpression::appendOpsToArg(Expr, Ops, 0, StackValue);
1546 
1547     // Some kind of simplification occurred: check whether the operand of the
1548     // salvaged debug expression can be encoded in this DAG.
1549     if (handleDebugValue(V, Var, Expr, DL, SDOrder, /*IsVariadic=*/false)) {
1550       LLVM_DEBUG(
1551           dbgs() << "Salvaged debug location info for:\n  " << *Var << "\n"
1552                  << *OrigV << "\nBy stripping back to:\n  " << *V << "\n");
1553       return;
1554     }
1555   }
1556 
1557   // This was the final opportunity to salvage this debug information, and it
1558   // couldn't be done. Place an undef DBG_VALUE at this location to terminate
1559   // any earlier variable location.
1560   assert(OrigV && "V shouldn't be null");
1561   auto *Undef = UndefValue::get(OrigV->getType());
1562   auto *SDV = DAG.getConstantDbgValue(Var, Expr, Undef, DL, SDNodeOrder);
1563   DAG.AddDbgValue(SDV, false);
1564   LLVM_DEBUG(dbgs() << "Dropping debug value info for:\n  "
1565                     << printDDI(OrigV, DDI) << "\n");
1566 }
1567 
1568 void SelectionDAGBuilder::handleKillDebugValue(DILocalVariable *Var,
1569                                                DIExpression *Expr,
1570                                                DebugLoc DbgLoc,
1571                                                unsigned Order) {
1572   Value *Poison = PoisonValue::get(Type::getInt1Ty(*Context));
1573   DIExpression *NewExpr =
1574       const_cast<DIExpression *>(DIExpression::convertToUndefExpression(Expr));
1575   handleDebugValue(Poison, Var, NewExpr, DbgLoc, Order,
1576                    /*IsVariadic*/ false);
1577 }
1578 
1579 bool SelectionDAGBuilder::handleDebugValue(ArrayRef<const Value *> Values,
1580                                            DILocalVariable *Var,
1581                                            DIExpression *Expr, DebugLoc DbgLoc,
1582                                            unsigned Order, bool IsVariadic) {
1583   if (Values.empty())
1584     return true;
1585 
1586   // Filter EntryValue locations out early.
1587   if (visitEntryValueDbgValue(Values, Var, Expr, DbgLoc))
1588     return true;
1589 
1590   SmallVector<SDDbgOperand> LocationOps;
1591   SmallVector<SDNode *> Dependencies;
1592   for (const Value *V : Values) {
1593     // Constant value.
1594     if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V) ||
1595         isa<ConstantPointerNull>(V)) {
1596       LocationOps.emplace_back(SDDbgOperand::fromConst(V));
1597       continue;
1598     }
1599 
1600     // Look through IntToPtr constants.
1601     if (auto *CE = dyn_cast<ConstantExpr>(V))
1602       if (CE->getOpcode() == Instruction::IntToPtr) {
1603         LocationOps.emplace_back(SDDbgOperand::fromConst(CE->getOperand(0)));
1604         continue;
1605       }
1606 
1607     // If the Value is a frame index, we can create a FrameIndex debug value
1608     // without relying on the DAG at all.
1609     if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1610       auto SI = FuncInfo.StaticAllocaMap.find(AI);
1611       if (SI != FuncInfo.StaticAllocaMap.end()) {
1612         LocationOps.emplace_back(SDDbgOperand::fromFrameIdx(SI->second));
1613         continue;
1614       }
1615     }
1616 
1617     // Do not use getValue() in here; we don't want to generate code at
1618     // this point if it hasn't been done yet.
1619     SDValue N = NodeMap[V];
1620     if (!N.getNode() && isa<Argument>(V)) // Check unused arguments map.
1621       N = UnusedArgNodeMap[V];
1622 
1623     if (N.getNode()) {
1624       // Only emit func arg dbg value for non-variadic dbg.values for now.
1625       if (!IsVariadic &&
1626           EmitFuncArgumentDbgValue(V, Var, Expr, DbgLoc,
1627                                    FuncArgumentDbgValueKind::Value, N))
1628         return true;
1629       if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) {
1630         // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can
1631         // describe stack slot locations.
1632         //
1633         // Consider "int x = 0; int *px = &x;". There are two kinds of
1634         // interesting debug values here after optimization:
1635         //
1636         //   dbg.value(i32* %px, !"int *px", !DIExpression()), and
1637         //   dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref))
1638         //
1639         // Both describe the direct values of their associated variables.
1640         Dependencies.push_back(N.getNode());
1641         LocationOps.emplace_back(SDDbgOperand::fromFrameIdx(FISDN->getIndex()));
1642         continue;
1643       }
1644       LocationOps.emplace_back(
1645           SDDbgOperand::fromNode(N.getNode(), N.getResNo()));
1646       continue;
1647     }
1648 
1649     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1650     // Special rules apply for the first dbg.values of parameter variables in a
1651     // function. Identify them by the fact they reference Argument Values, that
1652     // they're parameters, and they are parameters of the current function. We
1653     // need to let them dangle until they get an SDNode.
1654     bool IsParamOfFunc =
1655         isa<Argument>(V) && Var->isParameter() && !DbgLoc.getInlinedAt();
1656     if (IsParamOfFunc)
1657       return false;
1658 
1659     // The value is not used in this block yet (or it would have an SDNode).
1660     // We still want the value to appear for the user if possible -- if it has
1661     // an associated VReg, we can refer to that instead.
1662     auto VMI = FuncInfo.ValueMap.find(V);
1663     if (VMI != FuncInfo.ValueMap.end()) {
1664       unsigned Reg = VMI->second;
1665       // If this is a PHI node, it may be split up into several MI PHI nodes
1666       // (in FunctionLoweringInfo::set).
1667       RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg,
1668                        V->getType(), std::nullopt);
1669       if (RFV.occupiesMultipleRegs()) {
1670         // FIXME: We could potentially support variadic dbg_values here.
1671         if (IsVariadic)
1672           return false;
1673         unsigned Offset = 0;
1674         unsigned BitsToDescribe = 0;
1675         if (auto VarSize = Var->getSizeInBits())
1676           BitsToDescribe = *VarSize;
1677         if (auto Fragment = Expr->getFragmentInfo())
1678           BitsToDescribe = Fragment->SizeInBits;
1679         for (const auto &RegAndSize : RFV.getRegsAndSizes()) {
1680           // Bail out if all bits are described already.
1681           if (Offset >= BitsToDescribe)
1682             break;
1683           // TODO: handle scalable vectors.
1684           unsigned RegisterSize = RegAndSize.second;
1685           unsigned FragmentSize = (Offset + RegisterSize > BitsToDescribe)
1686                                       ? BitsToDescribe - Offset
1687                                       : RegisterSize;
1688           auto FragmentExpr = DIExpression::createFragmentExpression(
1689               Expr, Offset, FragmentSize);
1690           if (!FragmentExpr)
1691             continue;
1692           SDDbgValue *SDV = DAG.getVRegDbgValue(
1693               Var, *FragmentExpr, RegAndSize.first, false, DbgLoc, Order);
1694           DAG.AddDbgValue(SDV, false);
1695           Offset += RegisterSize;
1696         }
1697         return true;
1698       }
1699       // We can use simple vreg locations for variadic dbg_values as well.
1700       LocationOps.emplace_back(SDDbgOperand::fromVReg(Reg));
1701       continue;
1702     }
1703     // We failed to create a SDDbgOperand for V.
1704     return false;
1705   }
1706 
1707   // We have created a SDDbgOperand for each Value in Values.
1708   assert(!LocationOps.empty());
1709   SDDbgValue *SDV =
1710       DAG.getDbgValueList(Var, Expr, LocationOps, Dependencies,
1711                           /*IsIndirect=*/false, DbgLoc, Order, IsVariadic);
1712   DAG.AddDbgValue(SDV, /*isParameter=*/false);
1713   return true;
1714 }
1715 
1716 void SelectionDAGBuilder::resolveOrClearDbgInfo() {
1717   // Try to fixup any remaining dangling debug info -- and drop it if we can't.
1718   for (auto &Pair : DanglingDebugInfoMap)
1719     for (auto &DDI : Pair.second)
1720       salvageUnresolvedDbgValue(const_cast<Value *>(Pair.first), DDI);
1721   clearDanglingDebugInfo();
1722 }
1723 
1724 /// getCopyFromRegs - If there was virtual register allocated for the value V
1725 /// emit CopyFromReg of the specified type Ty. Return empty SDValue() otherwise.
1726 SDValue SelectionDAGBuilder::getCopyFromRegs(const Value *V, Type *Ty) {
1727   DenseMap<const Value *, Register>::iterator It = FuncInfo.ValueMap.find(V);
1728   SDValue Result;
1729 
1730   if (It != FuncInfo.ValueMap.end()) {
1731     Register InReg = It->second;
1732 
1733     RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
1734                      DAG.getDataLayout(), InReg, Ty,
1735                      std::nullopt); // This is not an ABI copy.
1736     SDValue Chain = DAG.getEntryNode();
1737     Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr,
1738                                  V);
1739     resolveDanglingDebugInfo(V, Result);
1740   }
1741 
1742   return Result;
1743 }
1744 
1745 /// getValue - Return an SDValue for the given Value.
1746 SDValue SelectionDAGBuilder::getValue(const Value *V) {
1747   // If we already have an SDValue for this value, use it. It's important
1748   // to do this first, so that we don't create a CopyFromReg if we already
1749   // have a regular SDValue.
1750   SDValue &N = NodeMap[V];
1751   if (N.getNode()) return N;
1752 
1753   // If there's a virtual register allocated and initialized for this
1754   // value, use it.
1755   if (SDValue copyFromReg = getCopyFromRegs(V, V->getType()))
1756     return copyFromReg;
1757 
1758   // Otherwise create a new SDValue and remember it.
1759   SDValue Val = getValueImpl(V);
1760   NodeMap[V] = Val;
1761   resolveDanglingDebugInfo(V, Val);
1762   return Val;
1763 }
1764 
1765 /// getNonRegisterValue - Return an SDValue for the given Value, but
1766 /// don't look in FuncInfo.ValueMap for a virtual register.
1767 SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) {
1768   // If we already have an SDValue for this value, use it.
1769   SDValue &N = NodeMap[V];
1770   if (N.getNode()) {
1771     if (isIntOrFPConstant(N)) {
1772       // Remove the debug location from the node as the node is about to be used
1773       // in a location which may differ from the original debug location.  This
1774       // is relevant to Constant and ConstantFP nodes because they can appear
1775       // as constant expressions inside PHI nodes.
1776       N->setDebugLoc(DebugLoc());
1777     }
1778     return N;
1779   }
1780 
1781   // Otherwise create a new SDValue and remember it.
1782   SDValue Val = getValueImpl(V);
1783   NodeMap[V] = Val;
1784   resolveDanglingDebugInfo(V, Val);
1785   return Val;
1786 }
1787 
1788 /// getValueImpl - Helper function for getValue and getNonRegisterValue.
1789 /// Create an SDValue for the given value.
1790 SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
1791   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1792 
1793   if (const Constant *C = dyn_cast<Constant>(V)) {
1794     EVT VT = TLI.getValueType(DAG.getDataLayout(), V->getType(), true);
1795 
1796     if (const ConstantInt *CI = dyn_cast<ConstantInt>(C))
1797       return DAG.getConstant(*CI, getCurSDLoc(), VT);
1798 
1799     if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
1800       return DAG.getGlobalAddress(GV, getCurSDLoc(), VT);
1801 
1802     if (const ConstantPtrAuth *CPA = dyn_cast<ConstantPtrAuth>(C)) {
1803       return DAG.getNode(ISD::PtrAuthGlobalAddress, getCurSDLoc(), VT,
1804                          getValue(CPA->getPointer()), getValue(CPA->getKey()),
1805                          getValue(CPA->getAddrDiscriminator()),
1806                          getValue(CPA->getDiscriminator()));
1807     }
1808 
1809     if (isa<ConstantPointerNull>(C)) {
1810       unsigned AS = V->getType()->getPointerAddressSpace();
1811       return DAG.getConstant(0, getCurSDLoc(),
1812                              TLI.getPointerTy(DAG.getDataLayout(), AS));
1813     }
1814 
1815     if (match(C, m_VScale()))
1816       return DAG.getVScale(getCurSDLoc(), VT, APInt(VT.getSizeInBits(), 1));
1817 
1818     if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
1819       return DAG.getConstantFP(*CFP, getCurSDLoc(), VT);
1820 
1821     if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
1822       return DAG.getUNDEF(VT);
1823 
1824     if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
1825       visit(CE->getOpcode(), *CE);
1826       SDValue N1 = NodeMap[V];
1827       assert(N1.getNode() && "visit didn't populate the NodeMap!");
1828       return N1;
1829     }
1830 
1831     if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
1832       SmallVector<SDValue, 4> Constants;
1833       for (const Use &U : C->operands()) {
1834         SDNode *Val = getValue(U).getNode();
1835         // If the operand is an empty aggregate, there are no values.
1836         if (!Val) continue;
1837         // Add each leaf value from the operand to the Constants list
1838         // to form a flattened list of all the values.
1839         for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1840           Constants.push_back(SDValue(Val, i));
1841       }
1842 
1843       return DAG.getMergeValues(Constants, getCurSDLoc());
1844     }
1845 
1846     if (const ConstantDataSequential *CDS =
1847           dyn_cast<ConstantDataSequential>(C)) {
1848       SmallVector<SDValue, 4> Ops;
1849       for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1850         SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode();
1851         // Add each leaf value from the operand to the Constants list
1852         // to form a flattened list of all the values.
1853         for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1854           Ops.push_back(SDValue(Val, i));
1855       }
1856 
1857       if (isa<ArrayType>(CDS->getType()))
1858         return DAG.getMergeValues(Ops, getCurSDLoc());
1859       return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1860     }
1861 
1862     if (C->getType()->isStructTy() || C->getType()->isArrayTy()) {
1863       assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
1864              "Unknown struct or array constant!");
1865 
1866       SmallVector<EVT, 4> ValueVTs;
1867       ComputeValueVTs(TLI, DAG.getDataLayout(), C->getType(), ValueVTs);
1868       unsigned NumElts = ValueVTs.size();
1869       if (NumElts == 0)
1870         return SDValue(); // empty struct
1871       SmallVector<SDValue, 4> Constants(NumElts);
1872       for (unsigned i = 0; i != NumElts; ++i) {
1873         EVT EltVT = ValueVTs[i];
1874         if (isa<UndefValue>(C))
1875           Constants[i] = DAG.getUNDEF(EltVT);
1876         else if (EltVT.isFloatingPoint())
1877           Constants[i] = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1878         else
1879           Constants[i] = DAG.getConstant(0, getCurSDLoc(), EltVT);
1880       }
1881 
1882       return DAG.getMergeValues(Constants, getCurSDLoc());
1883     }
1884 
1885     if (const BlockAddress *BA = dyn_cast<BlockAddress>(C))
1886       return DAG.getBlockAddress(BA, VT);
1887 
1888     if (const auto *Equiv = dyn_cast<DSOLocalEquivalent>(C))
1889       return getValue(Equiv->getGlobalValue());
1890 
1891     if (const auto *NC = dyn_cast<NoCFIValue>(C))
1892       return getValue(NC->getGlobalValue());
1893 
1894     if (VT == MVT::aarch64svcount) {
1895       assert(C->isNullValue() && "Can only zero this target type!");
1896       return DAG.getNode(ISD::BITCAST, getCurSDLoc(), VT,
1897                          DAG.getConstant(0, getCurSDLoc(), MVT::nxv16i1));
1898     }
1899 
1900     VectorType *VecTy = cast<VectorType>(V->getType());
1901 
1902     // Now that we know the number and type of the elements, get that number of
1903     // elements into the Ops array based on what kind of constant it is.
1904     if (const ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
1905       SmallVector<SDValue, 16> Ops;
1906       unsigned NumElements = cast<FixedVectorType>(VecTy)->getNumElements();
1907       for (unsigned i = 0; i != NumElements; ++i)
1908         Ops.push_back(getValue(CV->getOperand(i)));
1909 
1910       return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1911     }
1912 
1913     if (isa<ConstantAggregateZero>(C)) {
1914       EVT EltVT =
1915           TLI.getValueType(DAG.getDataLayout(), VecTy->getElementType());
1916 
1917       SDValue Op;
1918       if (EltVT.isFloatingPoint())
1919         Op = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1920       else
1921         Op = DAG.getConstant(0, getCurSDLoc(), EltVT);
1922 
1923       return NodeMap[V] = DAG.getSplat(VT, getCurSDLoc(), Op);
1924     }
1925 
1926     llvm_unreachable("Unknown vector constant");
1927   }
1928 
1929   // If this is a static alloca, generate it as the frameindex instead of
1930   // computation.
1931   if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1932     DenseMap<const AllocaInst*, int>::iterator SI =
1933       FuncInfo.StaticAllocaMap.find(AI);
1934     if (SI != FuncInfo.StaticAllocaMap.end())
1935       return DAG.getFrameIndex(
1936           SI->second, TLI.getValueType(DAG.getDataLayout(), AI->getType()));
1937   }
1938 
1939   // If this is an instruction which fast-isel has deferred, select it now.
1940   if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
1941     Register InReg = FuncInfo.InitializeRegForValue(Inst);
1942 
1943     RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg,
1944                      Inst->getType(), std::nullopt);
1945     SDValue Chain = DAG.getEntryNode();
1946     return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
1947   }
1948 
1949   if (const MetadataAsValue *MD = dyn_cast<MetadataAsValue>(V))
1950     return DAG.getMDNode(cast<MDNode>(MD->getMetadata()));
1951 
1952   if (const auto *BB = dyn_cast<BasicBlock>(V))
1953     return DAG.getBasicBlock(FuncInfo.getMBB(BB));
1954 
1955   llvm_unreachable("Can't get register for value!");
1956 }
1957 
1958 void SelectionDAGBuilder::visitCatchPad(const CatchPadInst &I) {
1959   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1960   bool IsMSVCCXX = Pers == EHPersonality::MSVC_CXX;
1961   bool IsCoreCLR = Pers == EHPersonality::CoreCLR;
1962   bool IsSEH = isAsynchronousEHPersonality(Pers);
1963   MachineBasicBlock *CatchPadMBB = FuncInfo.MBB;
1964   if (!IsSEH)
1965     CatchPadMBB->setIsEHScopeEntry();
1966   // In MSVC C++ and CoreCLR, catchblocks are funclets and need prologues.
1967   if (IsMSVCCXX || IsCoreCLR)
1968     CatchPadMBB->setIsEHFuncletEntry();
1969 }
1970 
1971 void SelectionDAGBuilder::visitCatchRet(const CatchReturnInst &I) {
1972   // Update machine-CFG edge.
1973   MachineBasicBlock *TargetMBB = FuncInfo.getMBB(I.getSuccessor());
1974   FuncInfo.MBB->addSuccessor(TargetMBB);
1975   TargetMBB->setIsEHCatchretTarget(true);
1976   DAG.getMachineFunction().setHasEHCatchret(true);
1977 
1978   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1979   bool IsSEH = isAsynchronousEHPersonality(Pers);
1980   if (IsSEH) {
1981     // If this is not a fall-through branch or optimizations are switched off,
1982     // emit the branch.
1983     if (TargetMBB != NextBlock(FuncInfo.MBB) ||
1984         TM.getOptLevel() == CodeGenOptLevel::None)
1985       DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
1986                               getControlRoot(), DAG.getBasicBlock(TargetMBB)));
1987     return;
1988   }
1989 
1990   // Figure out the funclet membership for the catchret's successor.
1991   // This will be used by the FuncletLayout pass to determine how to order the
1992   // BB's.
1993   // A 'catchret' returns to the outer scope's color.
1994   Value *ParentPad = I.getCatchSwitchParentPad();
1995   const BasicBlock *SuccessorColor;
1996   if (isa<ConstantTokenNone>(ParentPad))
1997     SuccessorColor = &FuncInfo.Fn->getEntryBlock();
1998   else
1999     SuccessorColor = cast<Instruction>(ParentPad)->getParent();
2000   assert(SuccessorColor && "No parent funclet for catchret!");
2001   MachineBasicBlock *SuccessorColorMBB = FuncInfo.getMBB(SuccessorColor);
2002   assert(SuccessorColorMBB && "No MBB for SuccessorColor!");
2003 
2004   // Create the terminator node.
2005   SDValue Ret = DAG.getNode(ISD::CATCHRET, getCurSDLoc(), MVT::Other,
2006                             getControlRoot(), DAG.getBasicBlock(TargetMBB),
2007                             DAG.getBasicBlock(SuccessorColorMBB));
2008   DAG.setRoot(Ret);
2009 }
2010 
2011 void SelectionDAGBuilder::visitCleanupPad(const CleanupPadInst &CPI) {
2012   // Don't emit any special code for the cleanuppad instruction. It just marks
2013   // the start of an EH scope/funclet.
2014   FuncInfo.MBB->setIsEHScopeEntry();
2015   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
2016   if (Pers != EHPersonality::Wasm_CXX) {
2017     FuncInfo.MBB->setIsEHFuncletEntry();
2018     FuncInfo.MBB->setIsCleanupFuncletEntry();
2019   }
2020 }
2021 
2022 // In wasm EH, even though a catchpad may not catch an exception if a tag does
2023 // not match, it is OK to add only the first unwind destination catchpad to the
2024 // successors, because there will be at least one invoke instruction within the
2025 // catch scope that points to the next unwind destination, if one exists, so
2026 // CFGSort cannot mess up with BB sorting order.
2027 // (All catchpads with 'catch (type)' clauses have a 'llvm.rethrow' intrinsic
2028 // call within them, and catchpads only consisting of 'catch (...)' have a
2029 // '__cxa_end_catch' call within them, both of which generate invokes in case
2030 // the next unwind destination exists, i.e., the next unwind destination is not
2031 // the caller.)
2032 //
2033 // Having at most one EH pad successor is also simpler and helps later
2034 // transformations.
2035 //
2036 // For example,
2037 // current:
2038 //   invoke void @foo to ... unwind label %catch.dispatch
2039 // catch.dispatch:
2040 //   %0 = catchswitch within ... [label %catch.start] unwind label %next
2041 // catch.start:
2042 //   ...
2043 //   ... in this BB or some other child BB dominated by this BB there will be an
2044 //   invoke that points to 'next' BB as an unwind destination
2045 //
2046 // next: ; We don't need to add this to 'current' BB's successor
2047 //   ...
2048 static void findWasmUnwindDestinations(
2049     FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
2050     BranchProbability Prob,
2051     SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
2052         &UnwindDests) {
2053   while (EHPadBB) {
2054     const Instruction *Pad = EHPadBB->getFirstNonPHI();
2055     if (isa<CleanupPadInst>(Pad)) {
2056       // Stop on cleanup pads.
2057       UnwindDests.emplace_back(FuncInfo.getMBB(EHPadBB), Prob);
2058       UnwindDests.back().first->setIsEHScopeEntry();
2059       break;
2060     } else if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2061       // Add the catchpad handlers to the possible destinations. We don't
2062       // continue to the unwind destination of the catchswitch for wasm.
2063       for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2064         UnwindDests.emplace_back(FuncInfo.getMBB(CatchPadBB), Prob);
2065         UnwindDests.back().first->setIsEHScopeEntry();
2066       }
2067       break;
2068     } else {
2069       continue;
2070     }
2071   }
2072 }
2073 
2074 /// When an invoke or a cleanupret unwinds to the next EH pad, there are
2075 /// many places it could ultimately go. In the IR, we have a single unwind
2076 /// destination, but in the machine CFG, we enumerate all the possible blocks.
2077 /// This function skips over imaginary basic blocks that hold catchswitch
2078 /// instructions, and finds all the "real" machine
2079 /// basic block destinations. As those destinations may not be successors of
2080 /// EHPadBB, here we also calculate the edge probability to those destinations.
2081 /// The passed-in Prob is the edge probability to EHPadBB.
2082 static void findUnwindDestinations(
2083     FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
2084     BranchProbability Prob,
2085     SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
2086         &UnwindDests) {
2087   EHPersonality Personality =
2088     classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
2089   bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX;
2090   bool IsCoreCLR = Personality == EHPersonality::CoreCLR;
2091   bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX;
2092   bool IsSEH = isAsynchronousEHPersonality(Personality);
2093 
2094   if (IsWasmCXX) {
2095     findWasmUnwindDestinations(FuncInfo, EHPadBB, Prob, UnwindDests);
2096     assert(UnwindDests.size() <= 1 &&
2097            "There should be at most one unwind destination for wasm");
2098     return;
2099   }
2100 
2101   while (EHPadBB) {
2102     const Instruction *Pad = EHPadBB->getFirstNonPHI();
2103     BasicBlock *NewEHPadBB = nullptr;
2104     if (isa<LandingPadInst>(Pad)) {
2105       // Stop on landingpads. They are not funclets.
2106       UnwindDests.emplace_back(FuncInfo.getMBB(EHPadBB), Prob);
2107       break;
2108     } else if (isa<CleanupPadInst>(Pad)) {
2109       // Stop on cleanup pads. Cleanups are always funclet entries for all known
2110       // personalities.
2111       UnwindDests.emplace_back(FuncInfo.getMBB(EHPadBB), Prob);
2112       UnwindDests.back().first->setIsEHScopeEntry();
2113       UnwindDests.back().first->setIsEHFuncletEntry();
2114       break;
2115     } else if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2116       // Add the catchpad handlers to the possible destinations.
2117       for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2118         UnwindDests.emplace_back(FuncInfo.getMBB(CatchPadBB), Prob);
2119         // For MSVC++ and the CLR, catchblocks are funclets and need prologues.
2120         if (IsMSVCCXX || IsCoreCLR)
2121           UnwindDests.back().first->setIsEHFuncletEntry();
2122         if (!IsSEH)
2123           UnwindDests.back().first->setIsEHScopeEntry();
2124       }
2125       NewEHPadBB = CatchSwitch->getUnwindDest();
2126     } else {
2127       continue;
2128     }
2129 
2130     BranchProbabilityInfo *BPI = FuncInfo.BPI;
2131     if (BPI && NewEHPadBB)
2132       Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB);
2133     EHPadBB = NewEHPadBB;
2134   }
2135 }
2136 
2137 void SelectionDAGBuilder::visitCleanupRet(const CleanupReturnInst &I) {
2138   // Update successor info.
2139   SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
2140   auto UnwindDest = I.getUnwindDest();
2141   BranchProbabilityInfo *BPI = FuncInfo.BPI;
2142   BranchProbability UnwindDestProb =
2143       (BPI && UnwindDest)
2144           ? BPI->getEdgeProbability(FuncInfo.MBB->getBasicBlock(), UnwindDest)
2145           : BranchProbability::getZero();
2146   findUnwindDestinations(FuncInfo, UnwindDest, UnwindDestProb, UnwindDests);
2147   for (auto &UnwindDest : UnwindDests) {
2148     UnwindDest.first->setIsEHPad();
2149     addSuccessorWithProb(FuncInfo.MBB, UnwindDest.first, UnwindDest.second);
2150   }
2151   FuncInfo.MBB->normalizeSuccProbs();
2152 
2153   // Create the terminator node.
2154   MachineBasicBlock *CleanupPadMBB =
2155       FuncInfo.getMBB(I.getCleanupPad()->getParent());
2156   SDValue Ret = DAG.getNode(ISD::CLEANUPRET, getCurSDLoc(), MVT::Other,
2157                             getControlRoot(), DAG.getBasicBlock(CleanupPadMBB));
2158   DAG.setRoot(Ret);
2159 }
2160 
2161 void SelectionDAGBuilder::visitCatchSwitch(const CatchSwitchInst &CSI) {
2162   report_fatal_error("visitCatchSwitch not yet implemented!");
2163 }
2164 
2165 void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
2166   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2167   auto &DL = DAG.getDataLayout();
2168   SDValue Chain = getControlRoot();
2169   SmallVector<ISD::OutputArg, 8> Outs;
2170   SmallVector<SDValue, 8> OutVals;
2171 
2172   // Calls to @llvm.experimental.deoptimize don't generate a return value, so
2173   // lower
2174   //
2175   //   %val = call <ty> @llvm.experimental.deoptimize()
2176   //   ret <ty> %val
2177   //
2178   // differently.
2179   if (I.getParent()->getTerminatingDeoptimizeCall()) {
2180     LowerDeoptimizingReturn();
2181     return;
2182   }
2183 
2184   if (!FuncInfo.CanLowerReturn) {
2185     Register DemoteReg = FuncInfo.DemoteRegister;
2186     const Function *F = I.getParent()->getParent();
2187 
2188     // Emit a store of the return value through the virtual register.
2189     // Leave Outs empty so that LowerReturn won't try to load return
2190     // registers the usual way.
2191     SmallVector<EVT, 1> PtrValueVTs;
2192     ComputeValueVTs(TLI, DL,
2193                     PointerType::get(F->getContext(),
2194                                      DAG.getDataLayout().getAllocaAddrSpace()),
2195                     PtrValueVTs);
2196 
2197     SDValue RetPtr =
2198         DAG.getCopyFromReg(Chain, getCurSDLoc(), DemoteReg, PtrValueVTs[0]);
2199     SDValue RetOp = getValue(I.getOperand(0));
2200 
2201     SmallVector<EVT, 4> ValueVTs, MemVTs;
2202     SmallVector<uint64_t, 4> Offsets;
2203     ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs, &MemVTs,
2204                     &Offsets, 0);
2205     unsigned NumValues = ValueVTs.size();
2206 
2207     SmallVector<SDValue, 4> Chains(NumValues);
2208     Align BaseAlign = DL.getPrefTypeAlign(I.getOperand(0)->getType());
2209     for (unsigned i = 0; i != NumValues; ++i) {
2210       // An aggregate return value cannot wrap around the address space, so
2211       // offsets to its parts don't wrap either.
2212       SDValue Ptr = DAG.getObjectPtrOffset(getCurSDLoc(), RetPtr,
2213                                            TypeSize::getFixed(Offsets[i]));
2214 
2215       SDValue Val = RetOp.getValue(RetOp.getResNo() + i);
2216       if (MemVTs[i] != ValueVTs[i])
2217         Val = DAG.getPtrExtOrTrunc(Val, getCurSDLoc(), MemVTs[i]);
2218       Chains[i] = DAG.getStore(
2219           Chain, getCurSDLoc(), Val,
2220           // FIXME: better loc info would be nice.
2221           Ptr, MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()),
2222           commonAlignment(BaseAlign, Offsets[i]));
2223     }
2224 
2225     Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
2226                         MVT::Other, Chains);
2227   } else if (I.getNumOperands() != 0) {
2228     SmallVector<EVT, 4> ValueVTs;
2229     ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs);
2230     unsigned NumValues = ValueVTs.size();
2231     if (NumValues) {
2232       SDValue RetOp = getValue(I.getOperand(0));
2233 
2234       const Function *F = I.getParent()->getParent();
2235 
2236       bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
2237           I.getOperand(0)->getType(), F->getCallingConv(),
2238           /*IsVarArg*/ false, DL);
2239 
2240       ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
2241       if (F->getAttributes().hasRetAttr(Attribute::SExt))
2242         ExtendKind = ISD::SIGN_EXTEND;
2243       else if (F->getAttributes().hasRetAttr(Attribute::ZExt))
2244         ExtendKind = ISD::ZERO_EXTEND;
2245 
2246       LLVMContext &Context = F->getContext();
2247       bool RetInReg = F->getAttributes().hasRetAttr(Attribute::InReg);
2248 
2249       for (unsigned j = 0; j != NumValues; ++j) {
2250         EVT VT = ValueVTs[j];
2251 
2252         if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
2253           VT = TLI.getTypeForExtReturn(Context, VT, ExtendKind);
2254 
2255         CallingConv::ID CC = F->getCallingConv();
2256 
2257         unsigned NumParts = TLI.getNumRegistersForCallingConv(Context, CC, VT);
2258         MVT PartVT = TLI.getRegisterTypeForCallingConv(Context, CC, VT);
2259         SmallVector<SDValue, 4> Parts(NumParts);
2260         getCopyToParts(DAG, getCurSDLoc(),
2261                        SDValue(RetOp.getNode(), RetOp.getResNo() + j),
2262                        &Parts[0], NumParts, PartVT, &I, CC, ExtendKind);
2263 
2264         // 'inreg' on function refers to return value
2265         ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
2266         if (RetInReg)
2267           Flags.setInReg();
2268 
2269         if (I.getOperand(0)->getType()->isPointerTy()) {
2270           Flags.setPointer();
2271           Flags.setPointerAddrSpace(
2272               cast<PointerType>(I.getOperand(0)->getType())->getAddressSpace());
2273         }
2274 
2275         if (NeedsRegBlock) {
2276           Flags.setInConsecutiveRegs();
2277           if (j == NumValues - 1)
2278             Flags.setInConsecutiveRegsLast();
2279         }
2280 
2281         // Propagate extension type if any
2282         if (ExtendKind == ISD::SIGN_EXTEND)
2283           Flags.setSExt();
2284         else if (ExtendKind == ISD::ZERO_EXTEND)
2285           Flags.setZExt();
2286         else if (F->getAttributes().hasRetAttr(Attribute::NoExt))
2287           Flags.setNoExt();
2288 
2289         for (unsigned i = 0; i < NumParts; ++i) {
2290           Outs.push_back(ISD::OutputArg(Flags,
2291                                         Parts[i].getValueType().getSimpleVT(),
2292                                         VT, /*isfixed=*/true, 0, 0));
2293           OutVals.push_back(Parts[i]);
2294         }
2295       }
2296     }
2297   }
2298 
2299   // Push in swifterror virtual register as the last element of Outs. This makes
2300   // sure swifterror virtual register will be returned in the swifterror
2301   // physical register.
2302   const Function *F = I.getParent()->getParent();
2303   if (TLI.supportSwiftError() &&
2304       F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
2305     assert(SwiftError.getFunctionArg() && "Need a swift error argument");
2306     ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
2307     Flags.setSwiftError();
2308     Outs.push_back(ISD::OutputArg(
2309         Flags, /*vt=*/TLI.getPointerTy(DL), /*argvt=*/EVT(TLI.getPointerTy(DL)),
2310         /*isfixed=*/true, /*origidx=*/1, /*partOffs=*/0));
2311     // Create SDNode for the swifterror virtual register.
2312     OutVals.push_back(
2313         DAG.getRegister(SwiftError.getOrCreateVRegUseAt(
2314                             &I, FuncInfo.MBB, SwiftError.getFunctionArg()),
2315                         EVT(TLI.getPointerTy(DL))));
2316   }
2317 
2318   bool isVarArg = DAG.getMachineFunction().getFunction().isVarArg();
2319   CallingConv::ID CallConv =
2320     DAG.getMachineFunction().getFunction().getCallingConv();
2321   Chain = DAG.getTargetLoweringInfo().LowerReturn(
2322       Chain, CallConv, isVarArg, Outs, OutVals, getCurSDLoc(), DAG);
2323 
2324   // Verify that the target's LowerReturn behaved as expected.
2325   assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
2326          "LowerReturn didn't return a valid chain!");
2327 
2328   // Update the DAG with the new chain value resulting from return lowering.
2329   DAG.setRoot(Chain);
2330 }
2331 
2332 /// CopyToExportRegsIfNeeded - If the given value has virtual registers
2333 /// created for it, emit nodes to copy the value into the virtual
2334 /// registers.
2335 void SelectionDAGBuilder::CopyToExportRegsIfNeeded(const Value *V) {
2336   // Skip empty types
2337   if (V->getType()->isEmptyTy())
2338     return;
2339 
2340   DenseMap<const Value *, Register>::iterator VMI = FuncInfo.ValueMap.find(V);
2341   if (VMI != FuncInfo.ValueMap.end()) {
2342     assert((!V->use_empty() || isa<CallBrInst>(V)) &&
2343            "Unused value assigned virtual registers!");
2344     CopyValueToVirtualRegister(V, VMI->second);
2345   }
2346 }
2347 
2348 /// ExportFromCurrentBlock - If this condition isn't known to be exported from
2349 /// the current basic block, add it to ValueMap now so that we'll get a
2350 /// CopyTo/FromReg.
2351 void SelectionDAGBuilder::ExportFromCurrentBlock(const Value *V) {
2352   // No need to export constants.
2353   if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
2354 
2355   // Already exported?
2356   if (FuncInfo.isExportedInst(V)) return;
2357 
2358   Register Reg = FuncInfo.InitializeRegForValue(V);
2359   CopyValueToVirtualRegister(V, Reg);
2360 }
2361 
2362 bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V,
2363                                                      const BasicBlock *FromBB) {
2364   // The operands of the setcc have to be in this block.  We don't know
2365   // how to export them from some other block.
2366   if (const Instruction *VI = dyn_cast<Instruction>(V)) {
2367     // Can export from current BB.
2368     if (VI->getParent() == FromBB)
2369       return true;
2370 
2371     // Is already exported, noop.
2372     return FuncInfo.isExportedInst(V);
2373   }
2374 
2375   // If this is an argument, we can export it if the BB is the entry block or
2376   // if it is already exported.
2377   if (isa<Argument>(V)) {
2378     if (FromBB->isEntryBlock())
2379       return true;
2380 
2381     // Otherwise, can only export this if it is already exported.
2382     return FuncInfo.isExportedInst(V);
2383   }
2384 
2385   // Otherwise, constants can always be exported.
2386   return true;
2387 }
2388 
2389 /// Return branch probability calculated by BranchProbabilityInfo for IR blocks.
2390 BranchProbability
2391 SelectionDAGBuilder::getEdgeProbability(const MachineBasicBlock *Src,
2392                                         const MachineBasicBlock *Dst) const {
2393   BranchProbabilityInfo *BPI = FuncInfo.BPI;
2394   const BasicBlock *SrcBB = Src->getBasicBlock();
2395   const BasicBlock *DstBB = Dst->getBasicBlock();
2396   if (!BPI) {
2397     // If BPI is not available, set the default probability as 1 / N, where N is
2398     // the number of successors.
2399     auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
2400     return BranchProbability(1, SuccSize);
2401   }
2402   return BPI->getEdgeProbability(SrcBB, DstBB);
2403 }
2404 
2405 void SelectionDAGBuilder::addSuccessorWithProb(MachineBasicBlock *Src,
2406                                                MachineBasicBlock *Dst,
2407                                                BranchProbability Prob) {
2408   if (!FuncInfo.BPI)
2409     Src->addSuccessorWithoutProb(Dst);
2410   else {
2411     if (Prob.isUnknown())
2412       Prob = getEdgeProbability(Src, Dst);
2413     Src->addSuccessor(Dst, Prob);
2414   }
2415 }
2416 
2417 static bool InBlock(const Value *V, const BasicBlock *BB) {
2418   if (const Instruction *I = dyn_cast<Instruction>(V))
2419     return I->getParent() == BB;
2420   return true;
2421 }
2422 
2423 /// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
2424 /// This function emits a branch and is used at the leaves of an OR or an
2425 /// AND operator tree.
2426 void
2427 SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
2428                                                   MachineBasicBlock *TBB,
2429                                                   MachineBasicBlock *FBB,
2430                                                   MachineBasicBlock *CurBB,
2431                                                   MachineBasicBlock *SwitchBB,
2432                                                   BranchProbability TProb,
2433                                                   BranchProbability FProb,
2434                                                   bool InvertCond) {
2435   const BasicBlock *BB = CurBB->getBasicBlock();
2436 
2437   // If the leaf of the tree is a comparison, merge the condition into
2438   // the caseblock.
2439   if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
2440     // The operands of the cmp have to be in this block.  We don't know
2441     // how to export them from some other block.  If this is the first block
2442     // of the sequence, no exporting is needed.
2443     if (CurBB == SwitchBB ||
2444         (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
2445          isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
2446       ISD::CondCode Condition;
2447       if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
2448         ICmpInst::Predicate Pred =
2449             InvertCond ? IC->getInversePredicate() : IC->getPredicate();
2450         Condition = getICmpCondCode(Pred);
2451       } else {
2452         const FCmpInst *FC = cast<FCmpInst>(Cond);
2453         FCmpInst::Predicate Pred =
2454             InvertCond ? FC->getInversePredicate() : FC->getPredicate();
2455         Condition = getFCmpCondCode(Pred);
2456         if (TM.Options.NoNaNsFPMath)
2457           Condition = getFCmpCodeWithoutNaN(Condition);
2458       }
2459 
2460       CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1), nullptr,
2461                    TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
2462       SL->SwitchCases.push_back(CB);
2463       return;
2464     }
2465   }
2466 
2467   // Create a CaseBlock record representing this branch.
2468   ISD::CondCode Opc = InvertCond ? ISD::SETNE : ISD::SETEQ;
2469   CaseBlock CB(Opc, Cond, ConstantInt::getTrue(*DAG.getContext()),
2470                nullptr, TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
2471   SL->SwitchCases.push_back(CB);
2472 }
2473 
2474 // Collect dependencies on V recursively. This is used for the cost analysis in
2475 // `shouldKeepJumpConditionsTogether`.
2476 static bool collectInstructionDeps(
2477     SmallMapVector<const Instruction *, bool, 8> *Deps, const Value *V,
2478     SmallMapVector<const Instruction *, bool, 8> *Necessary = nullptr,
2479     unsigned Depth = 0) {
2480   // Return false if we have an incomplete count.
2481   if (Depth >= SelectionDAG::MaxRecursionDepth)
2482     return false;
2483 
2484   auto *I = dyn_cast<Instruction>(V);
2485   if (I == nullptr)
2486     return true;
2487 
2488   if (Necessary != nullptr) {
2489     // This instruction is necessary for the other side of the condition so
2490     // don't count it.
2491     if (Necessary->contains(I))
2492       return true;
2493   }
2494 
2495   // Already added this dep.
2496   if (!Deps->try_emplace(I, false).second)
2497     return true;
2498 
2499   for (unsigned OpIdx = 0, E = I->getNumOperands(); OpIdx < E; ++OpIdx)
2500     if (!collectInstructionDeps(Deps, I->getOperand(OpIdx), Necessary,
2501                                 Depth + 1))
2502       return false;
2503   return true;
2504 }
2505 
2506 bool SelectionDAGBuilder::shouldKeepJumpConditionsTogether(
2507     const FunctionLoweringInfo &FuncInfo, const BranchInst &I,
2508     Instruction::BinaryOps Opc, const Value *Lhs, const Value *Rhs,
2509     TargetLoweringBase::CondMergingParams Params) const {
2510   if (I.getNumSuccessors() != 2)
2511     return false;
2512 
2513   if (!I.isConditional())
2514     return false;
2515 
2516   if (Params.BaseCost < 0)
2517     return false;
2518 
2519   // Baseline cost.
2520   InstructionCost CostThresh = Params.BaseCost;
2521 
2522   BranchProbabilityInfo *BPI = nullptr;
2523   if (Params.LikelyBias || Params.UnlikelyBias)
2524     BPI = FuncInfo.BPI;
2525   if (BPI != nullptr) {
2526     // See if we are either likely to get an early out or compute both lhs/rhs
2527     // of the condition.
2528     BasicBlock *IfFalse = I.getSuccessor(0);
2529     BasicBlock *IfTrue = I.getSuccessor(1);
2530 
2531     std::optional<bool> Likely;
2532     if (BPI->isEdgeHot(I.getParent(), IfTrue))
2533       Likely = true;
2534     else if (BPI->isEdgeHot(I.getParent(), IfFalse))
2535       Likely = false;
2536 
2537     if (Likely) {
2538       if (Opc == (*Likely ? Instruction::And : Instruction::Or))
2539         // Its likely we will have to compute both lhs and rhs of condition
2540         CostThresh += Params.LikelyBias;
2541       else {
2542         if (Params.UnlikelyBias < 0)
2543           return false;
2544         // Its likely we will get an early out.
2545         CostThresh -= Params.UnlikelyBias;
2546       }
2547     }
2548   }
2549 
2550   if (CostThresh <= 0)
2551     return false;
2552 
2553   // Collect "all" instructions that lhs condition is dependent on.
2554   // Use map for stable iteration (to avoid non-determanism of iteration of
2555   // SmallPtrSet). The `bool` value is just a dummy.
2556   SmallMapVector<const Instruction *, bool, 8> LhsDeps, RhsDeps;
2557   collectInstructionDeps(&LhsDeps, Lhs);
2558   // Collect "all" instructions that rhs condition is dependent on AND are
2559   // dependencies of lhs. This gives us an estimate on which instructions we
2560   // stand to save by splitting the condition.
2561   if (!collectInstructionDeps(&RhsDeps, Rhs, &LhsDeps))
2562     return false;
2563   // Add the compare instruction itself unless its a dependency on the LHS.
2564   if (const auto *RhsI = dyn_cast<Instruction>(Rhs))
2565     if (!LhsDeps.contains(RhsI))
2566       RhsDeps.try_emplace(RhsI, false);
2567 
2568   const auto &TLI = DAG.getTargetLoweringInfo();
2569   const auto &TTI =
2570       TLI.getTargetMachine().getTargetTransformInfo(*I.getFunction());
2571 
2572   InstructionCost CostOfIncluding = 0;
2573   // See if this instruction will need to computed independently of whether RHS
2574   // is.
2575   Value *BrCond = I.getCondition();
2576   auto ShouldCountInsn = [&RhsDeps, &BrCond](const Instruction *Ins) {
2577     for (const auto *U : Ins->users()) {
2578       // If user is independent of RHS calculation we don't need to count it.
2579       if (auto *UIns = dyn_cast<Instruction>(U))
2580         if (UIns != BrCond && !RhsDeps.contains(UIns))
2581           return false;
2582     }
2583     return true;
2584   };
2585 
2586   // Prune instructions from RHS Deps that are dependencies of unrelated
2587   // instructions. The value (SelectionDAG::MaxRecursionDepth) is fairly
2588   // arbitrary and just meant to cap the how much time we spend in the pruning
2589   // loop. Its highly unlikely to come into affect.
2590   const unsigned MaxPruneIters = SelectionDAG::MaxRecursionDepth;
2591   // Stop after a certain point. No incorrectness from including too many
2592   // instructions.
2593   for (unsigned PruneIters = 0; PruneIters < MaxPruneIters; ++PruneIters) {
2594     const Instruction *ToDrop = nullptr;
2595     for (const auto &InsPair : RhsDeps) {
2596       if (!ShouldCountInsn(InsPair.first)) {
2597         ToDrop = InsPair.first;
2598         break;
2599       }
2600     }
2601     if (ToDrop == nullptr)
2602       break;
2603     RhsDeps.erase(ToDrop);
2604   }
2605 
2606   for (const auto &InsPair : RhsDeps) {
2607     // Finally accumulate latency that we can only attribute to computing the
2608     // RHS condition. Use latency because we are essentially trying to calculate
2609     // the cost of the dependency chain.
2610     // Possible TODO: We could try to estimate ILP and make this more precise.
2611     CostOfIncluding +=
2612         TTI.getInstructionCost(InsPair.first, TargetTransformInfo::TCK_Latency);
2613 
2614     if (CostOfIncluding > CostThresh)
2615       return false;
2616   }
2617   return true;
2618 }
2619 
2620 void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
2621                                                MachineBasicBlock *TBB,
2622                                                MachineBasicBlock *FBB,
2623                                                MachineBasicBlock *CurBB,
2624                                                MachineBasicBlock *SwitchBB,
2625                                                Instruction::BinaryOps Opc,
2626                                                BranchProbability TProb,
2627                                                BranchProbability FProb,
2628                                                bool InvertCond) {
2629   // Skip over not part of the tree and remember to invert op and operands at
2630   // next level.
2631   Value *NotCond;
2632   if (match(Cond, m_OneUse(m_Not(m_Value(NotCond)))) &&
2633       InBlock(NotCond, CurBB->getBasicBlock())) {
2634     FindMergedConditions(NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
2635                          !InvertCond);
2636     return;
2637   }
2638 
2639   const Instruction *BOp = dyn_cast<Instruction>(Cond);
2640   const Value *BOpOp0, *BOpOp1;
2641   // Compute the effective opcode for Cond, taking into account whether it needs
2642   // to be inverted, e.g.
2643   //   and (not (or A, B)), C
2644   // gets lowered as
2645   //   and (and (not A, not B), C)
2646   Instruction::BinaryOps BOpc = (Instruction::BinaryOps)0;
2647   if (BOp) {
2648     BOpc = match(BOp, m_LogicalAnd(m_Value(BOpOp0), m_Value(BOpOp1)))
2649                ? Instruction::And
2650                : (match(BOp, m_LogicalOr(m_Value(BOpOp0), m_Value(BOpOp1)))
2651                       ? Instruction::Or
2652                       : (Instruction::BinaryOps)0);
2653     if (InvertCond) {
2654       if (BOpc == Instruction::And)
2655         BOpc = Instruction::Or;
2656       else if (BOpc == Instruction::Or)
2657         BOpc = Instruction::And;
2658     }
2659   }
2660 
2661   // If this node is not part of the or/and tree, emit it as a branch.
2662   // Note that all nodes in the tree should have same opcode.
2663   bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->hasOneUse();
2664   if (!BOpIsInOrAndTree || BOp->getParent() != CurBB->getBasicBlock() ||
2665       !InBlock(BOpOp0, CurBB->getBasicBlock()) ||
2666       !InBlock(BOpOp1, CurBB->getBasicBlock())) {
2667     EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB,
2668                                  TProb, FProb, InvertCond);
2669     return;
2670   }
2671 
2672   //  Create TmpBB after CurBB.
2673   MachineFunction::iterator BBI(CurBB);
2674   MachineFunction &MF = DAG.getMachineFunction();
2675   MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
2676   CurBB->getParent()->insert(++BBI, TmpBB);
2677 
2678   if (Opc == Instruction::Or) {
2679     // Codegen X | Y as:
2680     // BB1:
2681     //   jmp_if_X TBB
2682     //   jmp TmpBB
2683     // TmpBB:
2684     //   jmp_if_Y TBB
2685     //   jmp FBB
2686     //
2687 
2688     // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
2689     // The requirement is that
2690     //   TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
2691     //     = TrueProb for original BB.
2692     // Assuming the original probabilities are A and B, one choice is to set
2693     // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
2694     // A/(1+B) and 2B/(1+B). This choice assumes that
2695     //   TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
2696     // Another choice is to assume TrueProb for BB1 equals to TrueProb for
2697     // TmpBB, but the math is more complicated.
2698 
2699     auto NewTrueProb = TProb / 2;
2700     auto NewFalseProb = TProb / 2 + FProb;
2701     // Emit the LHS condition.
2702     FindMergedConditions(BOpOp0, TBB, TmpBB, CurBB, SwitchBB, Opc, NewTrueProb,
2703                          NewFalseProb, InvertCond);
2704 
2705     // Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
2706     SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb};
2707     BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
2708     // Emit the RHS condition into TmpBB.
2709     FindMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
2710                          Probs[1], InvertCond);
2711   } else {
2712     assert(Opc == Instruction::And && "Unknown merge op!");
2713     // Codegen X & Y as:
2714     // BB1:
2715     //   jmp_if_X TmpBB
2716     //   jmp FBB
2717     // TmpBB:
2718     //   jmp_if_Y TBB
2719     //   jmp FBB
2720     //
2721     //  This requires creation of TmpBB after CurBB.
2722 
2723     // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
2724     // The requirement is that
2725     //   FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
2726     //     = FalseProb for original BB.
2727     // Assuming the original probabilities are A and B, one choice is to set
2728     // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
2729     // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
2730     // TrueProb for BB1 * FalseProb for TmpBB.
2731 
2732     auto NewTrueProb = TProb + FProb / 2;
2733     auto NewFalseProb = FProb / 2;
2734     // Emit the LHS condition.
2735     FindMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB, Opc, NewTrueProb,
2736                          NewFalseProb, InvertCond);
2737 
2738     // Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
2739     SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2};
2740     BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
2741     // Emit the RHS condition into TmpBB.
2742     FindMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
2743                          Probs[1], InvertCond);
2744   }
2745 }
2746 
2747 /// If the set of cases should be emitted as a series of branches, return true.
2748 /// If we should emit this as a bunch of and/or'd together conditions, return
2749 /// false.
2750 bool
2751 SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases) {
2752   if (Cases.size() != 2) return true;
2753 
2754   // If this is two comparisons of the same values or'd or and'd together, they
2755   // will get folded into a single comparison, so don't emit two blocks.
2756   if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
2757        Cases[0].CmpRHS == Cases[1].CmpRHS) ||
2758       (Cases[0].CmpRHS == Cases[1].CmpLHS &&
2759        Cases[0].CmpLHS == Cases[1].CmpRHS)) {
2760     return false;
2761   }
2762 
2763   // Handle: (X != null) | (Y != null) --> (X|Y) != 0
2764   // Handle: (X == null) & (Y == null) --> (X|Y) == 0
2765   if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
2766       Cases[0].CC == Cases[1].CC &&
2767       isa<Constant>(Cases[0].CmpRHS) &&
2768       cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
2769     if (Cases[0].CC == ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
2770       return false;
2771     if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
2772       return false;
2773   }
2774 
2775   return true;
2776 }
2777 
2778 void SelectionDAGBuilder::visitBr(const BranchInst &I) {
2779   MachineBasicBlock *BrMBB = FuncInfo.MBB;
2780 
2781   // Update machine-CFG edges.
2782   MachineBasicBlock *Succ0MBB = FuncInfo.getMBB(I.getSuccessor(0));
2783 
2784   if (I.isUnconditional()) {
2785     // Update machine-CFG edges.
2786     BrMBB->addSuccessor(Succ0MBB);
2787 
2788     // If this is not a fall-through branch or optimizations are switched off,
2789     // emit the branch.
2790     if (Succ0MBB != NextBlock(BrMBB) ||
2791         TM.getOptLevel() == CodeGenOptLevel::None) {
2792       auto Br = DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
2793                             getControlRoot(), DAG.getBasicBlock(Succ0MBB));
2794       setValue(&I, Br);
2795       DAG.setRoot(Br);
2796     }
2797 
2798     return;
2799   }
2800 
2801   // If this condition is one of the special cases we handle, do special stuff
2802   // now.
2803   const Value *CondVal = I.getCondition();
2804   MachineBasicBlock *Succ1MBB = FuncInfo.getMBB(I.getSuccessor(1));
2805 
2806   // If this is a series of conditions that are or'd or and'd together, emit
2807   // this as a sequence of branches instead of setcc's with and/or operations.
2808   // As long as jumps are not expensive (exceptions for multi-use logic ops,
2809   // unpredictable branches, and vector extracts because those jumps are likely
2810   // expensive for any target), this should improve performance.
2811   // For example, instead of something like:
2812   //     cmp A, B
2813   //     C = seteq
2814   //     cmp D, E
2815   //     F = setle
2816   //     or C, F
2817   //     jnz foo
2818   // Emit:
2819   //     cmp A, B
2820   //     je foo
2821   //     cmp D, E
2822   //     jle foo
2823   bool IsUnpredictable = I.hasMetadata(LLVMContext::MD_unpredictable);
2824   const Instruction *BOp = dyn_cast<Instruction>(CondVal);
2825   if (!DAG.getTargetLoweringInfo().isJumpExpensive() && BOp &&
2826       BOp->hasOneUse() && !IsUnpredictable) {
2827     Value *Vec;
2828     const Value *BOp0, *BOp1;
2829     Instruction::BinaryOps Opcode = (Instruction::BinaryOps)0;
2830     if (match(BOp, m_LogicalAnd(m_Value(BOp0), m_Value(BOp1))))
2831       Opcode = Instruction::And;
2832     else if (match(BOp, m_LogicalOr(m_Value(BOp0), m_Value(BOp1))))
2833       Opcode = Instruction::Or;
2834 
2835     if (Opcode &&
2836         !(match(BOp0, m_ExtractElt(m_Value(Vec), m_Value())) &&
2837           match(BOp1, m_ExtractElt(m_Specific(Vec), m_Value()))) &&
2838         !shouldKeepJumpConditionsTogether(
2839             FuncInfo, I, Opcode, BOp0, BOp1,
2840             DAG.getTargetLoweringInfo().getJumpConditionMergingParams(
2841                 Opcode, BOp0, BOp1))) {
2842       FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB, Opcode,
2843                            getEdgeProbability(BrMBB, Succ0MBB),
2844                            getEdgeProbability(BrMBB, Succ1MBB),
2845                            /*InvertCond=*/false);
2846       // If the compares in later blocks need to use values not currently
2847       // exported from this block, export them now.  This block should always
2848       // be the first entry.
2849       assert(SL->SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!");
2850 
2851       // Allow some cases to be rejected.
2852       if (ShouldEmitAsBranches(SL->SwitchCases)) {
2853         for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i) {
2854           ExportFromCurrentBlock(SL->SwitchCases[i].CmpLHS);
2855           ExportFromCurrentBlock(SL->SwitchCases[i].CmpRHS);
2856         }
2857 
2858         // Emit the branch for this block.
2859         visitSwitchCase(SL->SwitchCases[0], BrMBB);
2860         SL->SwitchCases.erase(SL->SwitchCases.begin());
2861         return;
2862       }
2863 
2864       // Okay, we decided not to do this, remove any inserted MBB's and clear
2865       // SwitchCases.
2866       for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i)
2867         FuncInfo.MF->erase(SL->SwitchCases[i].ThisBB);
2868 
2869       SL->SwitchCases.clear();
2870     }
2871   }
2872 
2873   // Create a CaseBlock record representing this branch.
2874   CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
2875                nullptr, Succ0MBB, Succ1MBB, BrMBB, getCurSDLoc(),
2876                BranchProbability::getUnknown(), BranchProbability::getUnknown(),
2877                IsUnpredictable);
2878 
2879   // Use visitSwitchCase to actually insert the fast branch sequence for this
2880   // cond branch.
2881   visitSwitchCase(CB, BrMBB);
2882 }
2883 
2884 /// visitSwitchCase - Emits the necessary code to represent a single node in
2885 /// the binary search tree resulting from lowering a switch instruction.
2886 void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB,
2887                                           MachineBasicBlock *SwitchBB) {
2888   SDValue Cond;
2889   SDValue CondLHS = getValue(CB.CmpLHS);
2890   SDLoc dl = CB.DL;
2891 
2892   if (CB.CC == ISD::SETTRUE) {
2893     // Branch or fall through to TrueBB.
2894     addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
2895     SwitchBB->normalizeSuccProbs();
2896     if (CB.TrueBB != NextBlock(SwitchBB)) {
2897       DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, getControlRoot(),
2898                               DAG.getBasicBlock(CB.TrueBB)));
2899     }
2900     return;
2901   }
2902 
2903   auto &TLI = DAG.getTargetLoweringInfo();
2904   EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), CB.CmpLHS->getType());
2905 
2906   // Build the setcc now.
2907   if (!CB.CmpMHS) {
2908     // Fold "(X == true)" to X and "(X == false)" to !X to
2909     // handle common cases produced by branch lowering.
2910     if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
2911         CB.CC == ISD::SETEQ)
2912       Cond = CondLHS;
2913     else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) &&
2914              CB.CC == ISD::SETEQ) {
2915       SDValue True = DAG.getConstant(1, dl, CondLHS.getValueType());
2916       Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
2917     } else {
2918       SDValue CondRHS = getValue(CB.CmpRHS);
2919 
2920       // If a pointer's DAG type is larger than its memory type then the DAG
2921       // values are zero-extended. This breaks signed comparisons so truncate
2922       // back to the underlying type before doing the compare.
2923       if (CondLHS.getValueType() != MemVT) {
2924         CondLHS = DAG.getPtrExtOrTrunc(CondLHS, getCurSDLoc(), MemVT);
2925         CondRHS = DAG.getPtrExtOrTrunc(CondRHS, getCurSDLoc(), MemVT);
2926       }
2927       Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, CondRHS, CB.CC);
2928     }
2929   } else {
2930     assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
2931 
2932     const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
2933     const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
2934 
2935     SDValue CmpOp = getValue(CB.CmpMHS);
2936     EVT VT = CmpOp.getValueType();
2937 
2938     if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
2939       Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, dl, VT),
2940                           ISD::SETLE);
2941     } else {
2942       SDValue SUB = DAG.getNode(ISD::SUB, dl,
2943                                 VT, CmpOp, DAG.getConstant(Low, dl, VT));
2944       Cond = DAG.getSetCC(dl, MVT::i1, SUB,
2945                           DAG.getConstant(High-Low, dl, VT), ISD::SETULE);
2946     }
2947   }
2948 
2949   // Update successor info
2950   addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
2951   // TrueBB and FalseBB are always different unless the incoming IR is
2952   // degenerate. This only happens when running llc on weird IR.
2953   if (CB.TrueBB != CB.FalseBB)
2954     addSuccessorWithProb(SwitchBB, CB.FalseBB, CB.FalseProb);
2955   SwitchBB->normalizeSuccProbs();
2956 
2957   // If the lhs block is the next block, invert the condition so that we can
2958   // fall through to the lhs instead of the rhs block.
2959   if (CB.TrueBB == NextBlock(SwitchBB)) {
2960     std::swap(CB.TrueBB, CB.FalseBB);
2961     SDValue True = DAG.getConstant(1, dl, Cond.getValueType());
2962     Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
2963   }
2964 
2965   SDNodeFlags Flags;
2966   Flags.setUnpredictable(CB.IsUnpredictable);
2967   SDValue BrCond = DAG.getNode(ISD::BRCOND, dl, MVT::Other, getControlRoot(),
2968                                Cond, DAG.getBasicBlock(CB.TrueBB), Flags);
2969 
2970   setValue(CurInst, BrCond);
2971 
2972   // Insert the false branch. Do this even if it's a fall through branch,
2973   // this makes it easier to do DAG optimizations which require inverting
2974   // the branch condition.
2975   BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2976                        DAG.getBasicBlock(CB.FalseBB));
2977 
2978   DAG.setRoot(BrCond);
2979 }
2980 
2981 /// visitJumpTable - Emit JumpTable node in the current MBB
2982 void SelectionDAGBuilder::visitJumpTable(SwitchCG::JumpTable &JT) {
2983   // Emit the code for the jump table
2984   assert(JT.SL && "Should set SDLoc for SelectionDAG!");
2985   assert(JT.Reg && "Should lower JT Header first!");
2986   EVT PTy = DAG.getTargetLoweringInfo().getJumpTableRegTy(DAG.getDataLayout());
2987   SDValue Index = DAG.getCopyFromReg(getControlRoot(), *JT.SL, JT.Reg, PTy);
2988   SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
2989   SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, *JT.SL, MVT::Other,
2990                                     Index.getValue(1), Table, Index);
2991   DAG.setRoot(BrJumpTable);
2992 }
2993 
2994 /// visitJumpTableHeader - This function emits necessary code to produce index
2995 /// in the JumpTable from switch case.
2996 void SelectionDAGBuilder::visitJumpTableHeader(SwitchCG::JumpTable &JT,
2997                                                JumpTableHeader &JTH,
2998                                                MachineBasicBlock *SwitchBB) {
2999   assert(JT.SL && "Should set SDLoc for SelectionDAG!");
3000   const SDLoc &dl = *JT.SL;
3001 
3002   // Subtract the lowest switch case value from the value being switched on.
3003   SDValue SwitchOp = getValue(JTH.SValue);
3004   EVT VT = SwitchOp.getValueType();
3005   SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp,
3006                             DAG.getConstant(JTH.First, dl, VT));
3007 
3008   // The SDNode we just created, which holds the value being switched on minus
3009   // the smallest case value, needs to be copied to a virtual register so it
3010   // can be used as an index into the jump table in a subsequent basic block.
3011   // This value may be smaller or larger than the target's pointer type, and
3012   // therefore require extension or truncating.
3013   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3014   SwitchOp =
3015       DAG.getZExtOrTrunc(Sub, dl, TLI.getJumpTableRegTy(DAG.getDataLayout()));
3016 
3017   Register JumpTableReg =
3018       FuncInfo.CreateReg(TLI.getJumpTableRegTy(DAG.getDataLayout()));
3019   SDValue CopyTo =
3020       DAG.getCopyToReg(getControlRoot(), dl, JumpTableReg, SwitchOp);
3021   JT.Reg = JumpTableReg;
3022 
3023   if (!JTH.FallthroughUnreachable) {
3024     // Emit the range check for the jump table, and branch to the default block
3025     // for the switch statement if the value being switched on exceeds the
3026     // largest case in the switch.
3027     SDValue CMP = DAG.getSetCC(
3028         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
3029                                    Sub.getValueType()),
3030         Sub, DAG.getConstant(JTH.Last - JTH.First, dl, VT), ISD::SETUGT);
3031 
3032     SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
3033                                  MVT::Other, CopyTo, CMP,
3034                                  DAG.getBasicBlock(JT.Default));
3035 
3036     // Avoid emitting unnecessary branches to the next block.
3037     if (JT.MBB != NextBlock(SwitchBB))
3038       BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
3039                            DAG.getBasicBlock(JT.MBB));
3040 
3041     DAG.setRoot(BrCond);
3042   } else {
3043     // Avoid emitting unnecessary branches to the next block.
3044     if (JT.MBB != NextBlock(SwitchBB))
3045       DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, CopyTo,
3046                               DAG.getBasicBlock(JT.MBB)));
3047     else
3048       DAG.setRoot(CopyTo);
3049   }
3050 }
3051 
3052 /// Create a LOAD_STACK_GUARD node, and let it carry the target specific global
3053 /// variable if there exists one.
3054 static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL,
3055                                  SDValue &Chain) {
3056   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3057   EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
3058   EVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout());
3059   MachineFunction &MF = DAG.getMachineFunction();
3060   Value *Global = TLI.getSDagStackGuard(*MF.getFunction().getParent());
3061   MachineSDNode *Node =
3062       DAG.getMachineNode(TargetOpcode::LOAD_STACK_GUARD, DL, PtrTy, Chain);
3063   if (Global) {
3064     MachinePointerInfo MPInfo(Global);
3065     auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
3066                  MachineMemOperand::MODereferenceable;
3067     MachineMemOperand *MemRef = MF.getMachineMemOperand(
3068         MPInfo, Flags, LocationSize::precise(PtrTy.getSizeInBits() / 8),
3069         DAG.getEVTAlign(PtrTy));
3070     DAG.setNodeMemRefs(Node, {MemRef});
3071   }
3072   if (PtrTy != PtrMemTy)
3073     return DAG.getPtrExtOrTrunc(SDValue(Node, 0), DL, PtrMemTy);
3074   return SDValue(Node, 0);
3075 }
3076 
3077 /// Codegen a new tail for a stack protector check ParentMBB which has had its
3078 /// tail spliced into a stack protector check success bb.
3079 ///
3080 /// For a high level explanation of how this fits into the stack protector
3081 /// generation see the comment on the declaration of class
3082 /// StackProtectorDescriptor.
3083 void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
3084                                                   MachineBasicBlock *ParentBB) {
3085 
3086   // First create the loads to the guard/stack slot for the comparison.
3087   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3088   EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
3089   EVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout());
3090 
3091   MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
3092   int FI = MFI.getStackProtectorIndex();
3093 
3094   SDValue Guard;
3095   SDLoc dl = getCurSDLoc();
3096   SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy);
3097   const Module &M = *ParentBB->getParent()->getFunction().getParent();
3098   Align Align =
3099       DAG.getDataLayout().getPrefTypeAlign(PointerType::get(M.getContext(), 0));
3100 
3101   // Generate code to load the content of the guard slot.
3102   SDValue GuardVal = DAG.getLoad(
3103       PtrMemTy, dl, DAG.getEntryNode(), StackSlotPtr,
3104       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), Align,
3105       MachineMemOperand::MOVolatile);
3106 
3107   if (TLI.useStackGuardXorFP())
3108     GuardVal = TLI.emitStackGuardXorFP(DAG, GuardVal, dl);
3109 
3110   // Retrieve guard check function, nullptr if instrumentation is inlined.
3111   if (const Function *GuardCheckFn = TLI.getSSPStackGuardCheck(M)) {
3112     // The target provides a guard check function to validate the guard value.
3113     // Generate a call to that function with the content of the guard slot as
3114     // argument.
3115     FunctionType *FnTy = GuardCheckFn->getFunctionType();
3116     assert(FnTy->getNumParams() == 1 && "Invalid function signature");
3117 
3118     TargetLowering::ArgListTy Args;
3119     TargetLowering::ArgListEntry Entry;
3120     Entry.Node = GuardVal;
3121     Entry.Ty = FnTy->getParamType(0);
3122     if (GuardCheckFn->hasParamAttribute(0, Attribute::AttrKind::InReg))
3123       Entry.IsInReg = true;
3124     Args.push_back(Entry);
3125 
3126     TargetLowering::CallLoweringInfo CLI(DAG);
3127     CLI.setDebugLoc(getCurSDLoc())
3128         .setChain(DAG.getEntryNode())
3129         .setCallee(GuardCheckFn->getCallingConv(), FnTy->getReturnType(),
3130                    getValue(GuardCheckFn), std::move(Args));
3131 
3132     std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
3133     DAG.setRoot(Result.second);
3134     return;
3135   }
3136 
3137   // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
3138   // Otherwise, emit a volatile load to retrieve the stack guard value.
3139   SDValue Chain = DAG.getEntryNode();
3140   if (TLI.useLoadStackGuardNode(M)) {
3141     Guard = getLoadStackGuard(DAG, dl, Chain);
3142   } else {
3143     const Value *IRGuard = TLI.getSDagStackGuard(M);
3144     SDValue GuardPtr = getValue(IRGuard);
3145 
3146     Guard = DAG.getLoad(PtrMemTy, dl, Chain, GuardPtr,
3147                         MachinePointerInfo(IRGuard, 0), Align,
3148                         MachineMemOperand::MOVolatile);
3149   }
3150 
3151   // Perform the comparison via a getsetcc.
3152   SDValue Cmp = DAG.getSetCC(dl, TLI.getSetCCResultType(DAG.getDataLayout(),
3153                                                         *DAG.getContext(),
3154                                                         Guard.getValueType()),
3155                              Guard, GuardVal, ISD::SETNE);
3156 
3157   // If the guard/stackslot do not equal, branch to failure MBB.
3158   SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
3159                                MVT::Other, GuardVal.getOperand(0),
3160                                Cmp, DAG.getBasicBlock(SPD.getFailureMBB()));
3161   // Otherwise branch to success MBB.
3162   SDValue Br = DAG.getNode(ISD::BR, dl,
3163                            MVT::Other, BrCond,
3164                            DAG.getBasicBlock(SPD.getSuccessMBB()));
3165 
3166   DAG.setRoot(Br);
3167 }
3168 
3169 /// Codegen the failure basic block for a stack protector check.
3170 ///
3171 /// A failure stack protector machine basic block consists simply of a call to
3172 /// __stack_chk_fail().
3173 ///
3174 /// For a high level explanation of how this fits into the stack protector
3175 /// generation see the comment on the declaration of class
3176 /// StackProtectorDescriptor.
3177 void
3178 SelectionDAGBuilder::visitSPDescriptorFailure(StackProtectorDescriptor &SPD) {
3179   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3180   TargetLowering::MakeLibCallOptions CallOptions;
3181   CallOptions.setDiscardResult(true);
3182   SDValue Chain = TLI.makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL,
3183                                   MVT::isVoid, {}, CallOptions, getCurSDLoc())
3184                       .second;
3185 
3186   // Emit a trap instruction if we are required to do so.
3187   const TargetOptions &TargetOpts = DAG.getTarget().Options;
3188   if (TargetOpts.TrapUnreachable && !TargetOpts.NoTrapAfterNoreturn)
3189     Chain = DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, Chain);
3190 
3191   DAG.setRoot(Chain);
3192 }
3193 
3194 /// visitBitTestHeader - This function emits necessary code to produce value
3195 /// suitable for "bit tests"
3196 void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
3197                                              MachineBasicBlock *SwitchBB) {
3198   SDLoc dl = getCurSDLoc();
3199 
3200   // Subtract the minimum value.
3201   SDValue SwitchOp = getValue(B.SValue);
3202   EVT VT = SwitchOp.getValueType();
3203   SDValue RangeSub =
3204       DAG.getNode(ISD::SUB, dl, VT, SwitchOp, DAG.getConstant(B.First, dl, VT));
3205 
3206   // Determine the type of the test operands.
3207   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3208   bool UsePtrType = false;
3209   if (!TLI.isTypeLegal(VT)) {
3210     UsePtrType = true;
3211   } else {
3212     for (unsigned i = 0, e = B.Cases.size(); i != e; ++i)
3213       if (!isUIntN(VT.getSizeInBits(), B.Cases[i].Mask)) {
3214         // Switch table case range are encoded into series of masks.
3215         // Just use pointer type, it's guaranteed to fit.
3216         UsePtrType = true;
3217         break;
3218       }
3219   }
3220   SDValue Sub = RangeSub;
3221   if (UsePtrType) {
3222     VT = TLI.getPointerTy(DAG.getDataLayout());
3223     Sub = DAG.getZExtOrTrunc(Sub, dl, VT);
3224   }
3225 
3226   B.RegVT = VT.getSimpleVT();
3227   B.Reg = FuncInfo.CreateReg(B.RegVT);
3228   SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl, B.Reg, Sub);
3229 
3230   MachineBasicBlock* MBB = B.Cases[0].ThisBB;
3231 
3232   if (!B.FallthroughUnreachable)
3233     addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb);
3234   addSuccessorWithProb(SwitchBB, MBB, B.Prob);
3235   SwitchBB->normalizeSuccProbs();
3236 
3237   SDValue Root = CopyTo;
3238   if (!B.FallthroughUnreachable) {
3239     // Conditional branch to the default block.
3240     SDValue RangeCmp = DAG.getSetCC(dl,
3241         TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
3242                                RangeSub.getValueType()),
3243         RangeSub, DAG.getConstant(B.Range, dl, RangeSub.getValueType()),
3244         ISD::SETUGT);
3245 
3246     Root = DAG.getNode(ISD::BRCOND, dl, MVT::Other, Root, RangeCmp,
3247                        DAG.getBasicBlock(B.Default));
3248   }
3249 
3250   // Avoid emitting unnecessary branches to the next block.
3251   if (MBB != NextBlock(SwitchBB))
3252     Root = DAG.getNode(ISD::BR, dl, MVT::Other, Root, DAG.getBasicBlock(MBB));
3253 
3254   DAG.setRoot(Root);
3255 }
3256 
3257 /// visitBitTestCase - this function produces one "bit test"
3258 void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB,
3259                                            MachineBasicBlock *NextMBB,
3260                                            BranchProbability BranchProbToNext,
3261                                            Register Reg, BitTestCase &B,
3262                                            MachineBasicBlock *SwitchBB) {
3263   SDLoc dl = getCurSDLoc();
3264   MVT VT = BB.RegVT;
3265   SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), dl, Reg, VT);
3266   SDValue Cmp;
3267   unsigned PopCount = llvm::popcount(B.Mask);
3268   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3269   if (PopCount == 1) {
3270     // Testing for a single bit; just compare the shift count with what it
3271     // would need to be to shift a 1 bit in that position.
3272     Cmp = DAG.getSetCC(
3273         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
3274         ShiftOp, DAG.getConstant(llvm::countr_zero(B.Mask), dl, VT),
3275         ISD::SETEQ);
3276   } else if (PopCount == BB.Range) {
3277     // There is only one zero bit in the range, test for it directly.
3278     Cmp = DAG.getSetCC(
3279         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
3280         ShiftOp, DAG.getConstant(llvm::countr_one(B.Mask), dl, VT), ISD::SETNE);
3281   } else {
3282     // Make desired shift
3283     SDValue SwitchVal = DAG.getNode(ISD::SHL, dl, VT,
3284                                     DAG.getConstant(1, dl, VT), ShiftOp);
3285 
3286     // Emit bit tests and jumps
3287     SDValue AndOp = DAG.getNode(ISD::AND, dl,
3288                                 VT, SwitchVal, DAG.getConstant(B.Mask, dl, VT));
3289     Cmp = DAG.getSetCC(
3290         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
3291         AndOp, DAG.getConstant(0, dl, VT), ISD::SETNE);
3292   }
3293 
3294   // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
3295   addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb);
3296   // The branch probability from SwitchBB to NextMBB is BranchProbToNext.
3297   addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
3298   // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is
3299   // one as they are relative probabilities (and thus work more like weights),
3300   // and hence we need to normalize them to let the sum of them become one.
3301   SwitchBB->normalizeSuccProbs();
3302 
3303   SDValue BrAnd = DAG.getNode(ISD::BRCOND, dl,
3304                               MVT::Other, getControlRoot(),
3305                               Cmp, DAG.getBasicBlock(B.TargetBB));
3306 
3307   // Avoid emitting unnecessary branches to the next block.
3308   if (NextMBB != NextBlock(SwitchBB))
3309     BrAnd = DAG.getNode(ISD::BR, dl, MVT::Other, BrAnd,
3310                         DAG.getBasicBlock(NextMBB));
3311 
3312   DAG.setRoot(BrAnd);
3313 }
3314 
3315 void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
3316   MachineBasicBlock *InvokeMBB = FuncInfo.MBB;
3317 
3318   // Retrieve successors. Look through artificial IR level blocks like
3319   // catchswitch for successors.
3320   MachineBasicBlock *Return = FuncInfo.getMBB(I.getSuccessor(0));
3321   const BasicBlock *EHPadBB = I.getSuccessor(1);
3322   MachineBasicBlock *EHPadMBB = FuncInfo.getMBB(EHPadBB);
3323 
3324   // Deopt and ptrauth bundles are lowered in helper functions, and we don't
3325   // have to do anything here to lower funclet bundles.
3326   assert(!I.hasOperandBundlesOtherThan(
3327              {LLVMContext::OB_deopt, LLVMContext::OB_gc_transition,
3328               LLVMContext::OB_gc_live, LLVMContext::OB_funclet,
3329               LLVMContext::OB_cfguardtarget, LLVMContext::OB_ptrauth,
3330               LLVMContext::OB_clang_arc_attachedcall}) &&
3331          "Cannot lower invokes with arbitrary operand bundles yet!");
3332 
3333   const Value *Callee(I.getCalledOperand());
3334   const Function *Fn = dyn_cast<Function>(Callee);
3335   if (isa<InlineAsm>(Callee))
3336     visitInlineAsm(I, EHPadBB);
3337   else if (Fn && Fn->isIntrinsic()) {
3338     switch (Fn->getIntrinsicID()) {
3339     default:
3340       llvm_unreachable("Cannot invoke this intrinsic");
3341     case Intrinsic::donothing:
3342       // Ignore invokes to @llvm.donothing: jump directly to the next BB.
3343     case Intrinsic::seh_try_begin:
3344     case Intrinsic::seh_scope_begin:
3345     case Intrinsic::seh_try_end:
3346     case Intrinsic::seh_scope_end:
3347       if (EHPadMBB)
3348           // a block referenced by EH table
3349           // so dtor-funclet not removed by opts
3350           EHPadMBB->setMachineBlockAddressTaken();
3351       break;
3352     case Intrinsic::experimental_patchpoint_void:
3353     case Intrinsic::experimental_patchpoint:
3354       visitPatchpoint(I, EHPadBB);
3355       break;
3356     case Intrinsic::experimental_gc_statepoint:
3357       LowerStatepoint(cast<GCStatepointInst>(I), EHPadBB);
3358       break;
3359     case Intrinsic::wasm_rethrow: {
3360       // This is usually done in visitTargetIntrinsic, but this intrinsic is
3361       // special because it can be invoked, so we manually lower it to a DAG
3362       // node here.
3363       SmallVector<SDValue, 8> Ops;
3364       Ops.push_back(getControlRoot()); // inchain for the terminator node
3365       const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3366       Ops.push_back(
3367           DAG.getTargetConstant(Intrinsic::wasm_rethrow, getCurSDLoc(),
3368                                 TLI.getPointerTy(DAG.getDataLayout())));
3369       SDVTList VTs = DAG.getVTList(ArrayRef<EVT>({MVT::Other})); // outchain
3370       DAG.setRoot(DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops));
3371       break;
3372     }
3373     }
3374   } else if (I.hasDeoptState()) {
3375     // Currently we do not lower any intrinsic calls with deopt operand bundles.
3376     // Eventually we will support lowering the @llvm.experimental.deoptimize
3377     // intrinsic, and right now there are no plans to support other intrinsics
3378     // with deopt state.
3379     LowerCallSiteWithDeoptBundle(&I, getValue(Callee), EHPadBB);
3380   } else if (I.countOperandBundlesOfType(LLVMContext::OB_ptrauth)) {
3381     LowerCallSiteWithPtrAuthBundle(cast<CallBase>(I), EHPadBB);
3382   } else {
3383     LowerCallTo(I, getValue(Callee), false, false, EHPadBB);
3384   }
3385 
3386   // If the value of the invoke is used outside of its defining block, make it
3387   // available as a virtual register.
3388   // We already took care of the exported value for the statepoint instruction
3389   // during call to the LowerStatepoint.
3390   if (!isa<GCStatepointInst>(I)) {
3391     CopyToExportRegsIfNeeded(&I);
3392   }
3393 
3394   SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
3395   BranchProbabilityInfo *BPI = FuncInfo.BPI;
3396   BranchProbability EHPadBBProb =
3397       BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB)
3398           : BranchProbability::getZero();
3399   findUnwindDestinations(FuncInfo, EHPadBB, EHPadBBProb, UnwindDests);
3400 
3401   // Update successor info.
3402   addSuccessorWithProb(InvokeMBB, Return);
3403   for (auto &UnwindDest : UnwindDests) {
3404     UnwindDest.first->setIsEHPad();
3405     addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
3406   }
3407   InvokeMBB->normalizeSuccProbs();
3408 
3409   // Drop into normal successor.
3410   DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, getControlRoot(),
3411                           DAG.getBasicBlock(Return)));
3412 }
3413 
3414 void SelectionDAGBuilder::visitCallBr(const CallBrInst &I) {
3415   MachineBasicBlock *CallBrMBB = FuncInfo.MBB;
3416 
3417   // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
3418   // have to do anything here to lower funclet bundles.
3419   assert(!I.hasOperandBundlesOtherThan(
3420              {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) &&
3421          "Cannot lower callbrs with arbitrary operand bundles yet!");
3422 
3423   assert(I.isInlineAsm() && "Only know how to handle inlineasm callbr");
3424   visitInlineAsm(I);
3425   CopyToExportRegsIfNeeded(&I);
3426 
3427   // Retrieve successors.
3428   SmallPtrSet<BasicBlock *, 8> Dests;
3429   Dests.insert(I.getDefaultDest());
3430   MachineBasicBlock *Return = FuncInfo.getMBB(I.getDefaultDest());
3431 
3432   // Update successor info.
3433   addSuccessorWithProb(CallBrMBB, Return, BranchProbability::getOne());
3434   for (unsigned i = 0, e = I.getNumIndirectDests(); i < e; ++i) {
3435     BasicBlock *Dest = I.getIndirectDest(i);
3436     MachineBasicBlock *Target = FuncInfo.getMBB(Dest);
3437     Target->setIsInlineAsmBrIndirectTarget();
3438     Target->setMachineBlockAddressTaken();
3439     Target->setLabelMustBeEmitted();
3440     // Don't add duplicate machine successors.
3441     if (Dests.insert(Dest).second)
3442       addSuccessorWithProb(CallBrMBB, Target, BranchProbability::getZero());
3443   }
3444   CallBrMBB->normalizeSuccProbs();
3445 
3446   // Drop into default successor.
3447   DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
3448                           MVT::Other, getControlRoot(),
3449                           DAG.getBasicBlock(Return)));
3450 }
3451 
3452 void SelectionDAGBuilder::visitResume(const ResumeInst &RI) {
3453   llvm_unreachable("SelectionDAGBuilder shouldn't visit resume instructions!");
3454 }
3455 
3456 void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) {
3457   assert(FuncInfo.MBB->isEHPad() &&
3458          "Call to landingpad not in landing pad!");
3459 
3460   // If there aren't registers to copy the values into (e.g., during SjLj
3461   // exceptions), then don't bother to create these DAG nodes.
3462   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3463   const Constant *PersonalityFn = FuncInfo.Fn->getPersonalityFn();
3464   if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
3465       TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
3466     return;
3467 
3468   // If landingpad's return type is token type, we don't create DAG nodes
3469   // for its exception pointer and selector value. The extraction of exception
3470   // pointer or selector value from token type landingpads is not currently
3471   // supported.
3472   if (LP.getType()->isTokenTy())
3473     return;
3474 
3475   SmallVector<EVT, 2> ValueVTs;
3476   SDLoc dl = getCurSDLoc();
3477   ComputeValueVTs(TLI, DAG.getDataLayout(), LP.getType(), ValueVTs);
3478   assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported");
3479 
3480   // Get the two live-in registers as SDValues. The physregs have already been
3481   // copied into virtual registers.
3482   SDValue Ops[2];
3483   if (FuncInfo.ExceptionPointerVirtReg) {
3484     Ops[0] = DAG.getZExtOrTrunc(
3485         DAG.getCopyFromReg(DAG.getEntryNode(), dl,
3486                            FuncInfo.ExceptionPointerVirtReg,
3487                            TLI.getPointerTy(DAG.getDataLayout())),
3488         dl, ValueVTs[0]);
3489   } else {
3490     Ops[0] = DAG.getConstant(0, dl, TLI.getPointerTy(DAG.getDataLayout()));
3491   }
3492   Ops[1] = DAG.getZExtOrTrunc(
3493       DAG.getCopyFromReg(DAG.getEntryNode(), dl,
3494                          FuncInfo.ExceptionSelectorVirtReg,
3495                          TLI.getPointerTy(DAG.getDataLayout())),
3496       dl, ValueVTs[1]);
3497 
3498   // Merge into one.
3499   SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl,
3500                             DAG.getVTList(ValueVTs), Ops);
3501   setValue(&LP, Res);
3502 }
3503 
3504 void SelectionDAGBuilder::UpdateSplitBlock(MachineBasicBlock *First,
3505                                            MachineBasicBlock *Last) {
3506   // Update JTCases.
3507   for (JumpTableBlock &JTB : SL->JTCases)
3508     if (JTB.first.HeaderBB == First)
3509       JTB.first.HeaderBB = Last;
3510 
3511   // Update BitTestCases.
3512   for (BitTestBlock &BTB : SL->BitTestCases)
3513     if (BTB.Parent == First)
3514       BTB.Parent = Last;
3515 }
3516 
3517 void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
3518   MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB;
3519 
3520   // Update machine-CFG edges with unique successors.
3521   SmallSet<BasicBlock*, 32> Done;
3522   for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) {
3523     BasicBlock *BB = I.getSuccessor(i);
3524     bool Inserted = Done.insert(BB).second;
3525     if (!Inserted)
3526         continue;
3527 
3528     MachineBasicBlock *Succ = FuncInfo.getMBB(BB);
3529     addSuccessorWithProb(IndirectBrMBB, Succ);
3530   }
3531   IndirectBrMBB->normalizeSuccProbs();
3532 
3533   DAG.setRoot(DAG.getNode(ISD::BRIND, getCurSDLoc(),
3534                           MVT::Other, getControlRoot(),
3535                           getValue(I.getAddress())));
3536 }
3537 
3538 void SelectionDAGBuilder::visitUnreachable(const UnreachableInst &I) {
3539   if (!DAG.getTarget().Options.TrapUnreachable)
3540     return;
3541 
3542   // We may be able to ignore unreachable behind a noreturn call.
3543   if (const CallInst *Call = dyn_cast_or_null<CallInst>(I.getPrevNode());
3544       Call && Call->doesNotReturn()) {
3545     if (DAG.getTarget().Options.NoTrapAfterNoreturn)
3546       return;
3547     // Do not emit an additional trap instruction.
3548     if (Call->isNonContinuableTrap())
3549       return;
3550   }
3551 
3552   DAG.setRoot(DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, DAG.getRoot()));
3553 }
3554 
3555 void SelectionDAGBuilder::visitUnary(const User &I, unsigned Opcode) {
3556   SDNodeFlags Flags;
3557   if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
3558     Flags.copyFMF(*FPOp);
3559 
3560   SDValue Op = getValue(I.getOperand(0));
3561   SDValue UnNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op.getValueType(),
3562                                     Op, Flags);
3563   setValue(&I, UnNodeValue);
3564 }
3565 
3566 void SelectionDAGBuilder::visitBinary(const User &I, unsigned Opcode) {
3567   SDNodeFlags Flags;
3568   if (auto *OFBinOp = dyn_cast<OverflowingBinaryOperator>(&I)) {
3569     Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap());
3570     Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap());
3571   }
3572   if (auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I))
3573     Flags.setExact(ExactOp->isExact());
3574   if (auto *DisjointOp = dyn_cast<PossiblyDisjointInst>(&I))
3575     Flags.setDisjoint(DisjointOp->isDisjoint());
3576   if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
3577     Flags.copyFMF(*FPOp);
3578 
3579   SDValue Op1 = getValue(I.getOperand(0));
3580   SDValue Op2 = getValue(I.getOperand(1));
3581   SDValue BinNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(),
3582                                      Op1, Op2, Flags);
3583   setValue(&I, BinNodeValue);
3584 }
3585 
3586 void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
3587   SDValue Op1 = getValue(I.getOperand(0));
3588   SDValue Op2 = getValue(I.getOperand(1));
3589 
3590   EVT ShiftTy = DAG.getTargetLoweringInfo().getShiftAmountTy(
3591       Op1.getValueType(), DAG.getDataLayout());
3592 
3593   // Coerce the shift amount to the right type if we can. This exposes the
3594   // truncate or zext to optimization early.
3595   if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {
3596     assert(ShiftTy.getSizeInBits() >= Log2_32_Ceil(Op1.getValueSizeInBits()) &&
3597            "Unexpected shift type");
3598     Op2 = DAG.getZExtOrTrunc(Op2, getCurSDLoc(), ShiftTy);
3599   }
3600 
3601   bool nuw = false;
3602   bool nsw = false;
3603   bool exact = false;
3604 
3605   if (Opcode == ISD::SRL || Opcode == ISD::SRA || Opcode == ISD::SHL) {
3606 
3607     if (const OverflowingBinaryOperator *OFBinOp =
3608             dyn_cast<const OverflowingBinaryOperator>(&I)) {
3609       nuw = OFBinOp->hasNoUnsignedWrap();
3610       nsw = OFBinOp->hasNoSignedWrap();
3611     }
3612     if (const PossiblyExactOperator *ExactOp =
3613             dyn_cast<const PossiblyExactOperator>(&I))
3614       exact = ExactOp->isExact();
3615   }
3616   SDNodeFlags Flags;
3617   Flags.setExact(exact);
3618   Flags.setNoSignedWrap(nsw);
3619   Flags.setNoUnsignedWrap(nuw);
3620   SDValue Res = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(), Op1, Op2,
3621                             Flags);
3622   setValue(&I, Res);
3623 }
3624 
3625 void SelectionDAGBuilder::visitSDiv(const User &I) {
3626   SDValue Op1 = getValue(I.getOperand(0));
3627   SDValue Op2 = getValue(I.getOperand(1));
3628 
3629   SDNodeFlags Flags;
3630   Flags.setExact(isa<PossiblyExactOperator>(&I) &&
3631                  cast<PossiblyExactOperator>(&I)->isExact());
3632   setValue(&I, DAG.getNode(ISD::SDIV, getCurSDLoc(), Op1.getValueType(), Op1,
3633                            Op2, Flags));
3634 }
3635 
3636 void SelectionDAGBuilder::visitICmp(const ICmpInst &I) {
3637   ICmpInst::Predicate predicate = I.getPredicate();
3638   SDValue Op1 = getValue(I.getOperand(0));
3639   SDValue Op2 = getValue(I.getOperand(1));
3640   ISD::CondCode Opcode = getICmpCondCode(predicate);
3641 
3642   auto &TLI = DAG.getTargetLoweringInfo();
3643   EVT MemVT =
3644       TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
3645 
3646   // If a pointer's DAG type is larger than its memory type then the DAG values
3647   // are zero-extended. This breaks signed comparisons so truncate back to the
3648   // underlying type before doing the compare.
3649   if (Op1.getValueType() != MemVT) {
3650     Op1 = DAG.getPtrExtOrTrunc(Op1, getCurSDLoc(), MemVT);
3651     Op2 = DAG.getPtrExtOrTrunc(Op2, getCurSDLoc(), MemVT);
3652   }
3653 
3654   SDNodeFlags Flags;
3655   Flags.setSameSign(I.hasSameSign());
3656   SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
3657 
3658   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3659                                                         I.getType());
3660   setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Opcode));
3661 }
3662 
3663 void SelectionDAGBuilder::visitFCmp(const FCmpInst &I) {
3664   FCmpInst::Predicate predicate = I.getPredicate();
3665   SDValue Op1 = getValue(I.getOperand(0));
3666   SDValue Op2 = getValue(I.getOperand(1));
3667 
3668   ISD::CondCode Condition = getFCmpCondCode(predicate);
3669   auto *FPMO = cast<FPMathOperator>(&I);
3670   if (FPMO->hasNoNaNs() || TM.Options.NoNaNsFPMath)
3671     Condition = getFCmpCodeWithoutNaN(Condition);
3672 
3673   SDNodeFlags Flags;
3674   Flags.copyFMF(*FPMO);
3675   SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
3676 
3677   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3678                                                         I.getType());
3679   setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition));
3680 }
3681 
3682 // Check if the condition of the select has one use or two users that are both
3683 // selects with the same condition.
3684 static bool hasOnlySelectUsers(const Value *Cond) {
3685   return llvm::all_of(Cond->users(), [](const Value *V) {
3686     return isa<SelectInst>(V);
3687   });
3688 }
3689 
3690 void SelectionDAGBuilder::visitSelect(const User &I) {
3691   SmallVector<EVT, 4> ValueVTs;
3692   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
3693                   ValueVTs);
3694   unsigned NumValues = ValueVTs.size();
3695   if (NumValues == 0) return;
3696 
3697   SmallVector<SDValue, 4> Values(NumValues);
3698   SDValue Cond     = getValue(I.getOperand(0));
3699   SDValue LHSVal   = getValue(I.getOperand(1));
3700   SDValue RHSVal   = getValue(I.getOperand(2));
3701   SmallVector<SDValue, 1> BaseOps(1, Cond);
3702   ISD::NodeType OpCode =
3703       Cond.getValueType().isVector() ? ISD::VSELECT : ISD::SELECT;
3704 
3705   bool IsUnaryAbs = false;
3706   bool Negate = false;
3707 
3708   SDNodeFlags Flags;
3709   if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
3710     Flags.copyFMF(*FPOp);
3711 
3712   Flags.setUnpredictable(
3713       cast<SelectInst>(I).getMetadata(LLVMContext::MD_unpredictable));
3714 
3715   // Min/max matching is only viable if all output VTs are the same.
3716   if (all_equal(ValueVTs)) {
3717     EVT VT = ValueVTs[0];
3718     LLVMContext &Ctx = *DAG.getContext();
3719     auto &TLI = DAG.getTargetLoweringInfo();
3720 
3721     // We care about the legality of the operation after it has been type
3722     // legalized.
3723     while (TLI.getTypeAction(Ctx, VT) != TargetLoweringBase::TypeLegal)
3724       VT = TLI.getTypeToTransformTo(Ctx, VT);
3725 
3726     // If the vselect is legal, assume we want to leave this as a vector setcc +
3727     // vselect. Otherwise, if this is going to be scalarized, we want to see if
3728     // min/max is legal on the scalar type.
3729     bool UseScalarMinMax = VT.isVector() &&
3730       !TLI.isOperationLegalOrCustom(ISD::VSELECT, VT);
3731 
3732     // ValueTracking's select pattern matching does not account for -0.0,
3733     // so we can't lower to FMINIMUM/FMAXIMUM because those nodes specify that
3734     // -0.0 is less than +0.0.
3735     const Value *LHS, *RHS;
3736     auto SPR = matchSelectPattern(&I, LHS, RHS);
3737     ISD::NodeType Opc = ISD::DELETED_NODE;
3738     switch (SPR.Flavor) {
3739     case SPF_UMAX:    Opc = ISD::UMAX; break;
3740     case SPF_UMIN:    Opc = ISD::UMIN; break;
3741     case SPF_SMAX:    Opc = ISD::SMAX; break;
3742     case SPF_SMIN:    Opc = ISD::SMIN; break;
3743     case SPF_FMINNUM:
3744       switch (SPR.NaNBehavior) {
3745       case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
3746       case SPNB_RETURNS_NAN: break;
3747       case SPNB_RETURNS_OTHER: Opc = ISD::FMINNUM; break;
3748       case SPNB_RETURNS_ANY:
3749         if (TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT) ||
3750             (UseScalarMinMax &&
3751              TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT.getScalarType())))
3752           Opc = ISD::FMINNUM;
3753         break;
3754       }
3755       break;
3756     case SPF_FMAXNUM:
3757       switch (SPR.NaNBehavior) {
3758       case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
3759       case SPNB_RETURNS_NAN: break;
3760       case SPNB_RETURNS_OTHER: Opc = ISD::FMAXNUM; break;
3761       case SPNB_RETURNS_ANY:
3762         if (TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT) ||
3763             (UseScalarMinMax &&
3764              TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT.getScalarType())))
3765           Opc = ISD::FMAXNUM;
3766         break;
3767       }
3768       break;
3769     case SPF_NABS:
3770       Negate = true;
3771       [[fallthrough]];
3772     case SPF_ABS:
3773       IsUnaryAbs = true;
3774       Opc = ISD::ABS;
3775       break;
3776     default: break;
3777     }
3778 
3779     if (!IsUnaryAbs && Opc != ISD::DELETED_NODE &&
3780         (TLI.isOperationLegalOrCustom(Opc, VT) ||
3781          (UseScalarMinMax &&
3782           TLI.isOperationLegalOrCustom(Opc, VT.getScalarType()))) &&
3783         // If the underlying comparison instruction is used by any other
3784         // instruction, the consumed instructions won't be destroyed, so it is
3785         // not profitable to convert to a min/max.
3786         hasOnlySelectUsers(cast<SelectInst>(I).getCondition())) {
3787       OpCode = Opc;
3788       LHSVal = getValue(LHS);
3789       RHSVal = getValue(RHS);
3790       BaseOps.clear();
3791     }
3792 
3793     if (IsUnaryAbs) {
3794       OpCode = Opc;
3795       LHSVal = getValue(LHS);
3796       BaseOps.clear();
3797     }
3798   }
3799 
3800   if (IsUnaryAbs) {
3801     for (unsigned i = 0; i != NumValues; ++i) {
3802       SDLoc dl = getCurSDLoc();
3803       EVT VT = LHSVal.getNode()->getValueType(LHSVal.getResNo() + i);
3804       Values[i] =
3805           DAG.getNode(OpCode, dl, VT, LHSVal.getValue(LHSVal.getResNo() + i));
3806       if (Negate)
3807         Values[i] = DAG.getNegative(Values[i], dl, VT);
3808     }
3809   } else {
3810     for (unsigned i = 0; i != NumValues; ++i) {
3811       SmallVector<SDValue, 3> Ops(BaseOps.begin(), BaseOps.end());
3812       Ops.push_back(SDValue(LHSVal.getNode(), LHSVal.getResNo() + i));
3813       Ops.push_back(SDValue(RHSVal.getNode(), RHSVal.getResNo() + i));
3814       Values[i] = DAG.getNode(
3815           OpCode, getCurSDLoc(),
3816           LHSVal.getNode()->getValueType(LHSVal.getResNo() + i), Ops, Flags);
3817     }
3818   }
3819 
3820   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3821                            DAG.getVTList(ValueVTs), Values));
3822 }
3823 
3824 void SelectionDAGBuilder::visitTrunc(const User &I) {
3825   // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
3826   SDValue N = getValue(I.getOperand(0));
3827   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3828                                                         I.getType());
3829   SDNodeFlags Flags;
3830   if (auto *Trunc = dyn_cast<TruncInst>(&I)) {
3831     Flags.setNoSignedWrap(Trunc->hasNoSignedWrap());
3832     Flags.setNoUnsignedWrap(Trunc->hasNoUnsignedWrap());
3833   }
3834 
3835   setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), DestVT, N, Flags));
3836 }
3837 
3838 void SelectionDAGBuilder::visitZExt(const User &I) {
3839   // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3840   // ZExt also can't be a cast to bool for same reason. So, nothing much to do
3841   SDValue N = getValue(I.getOperand(0));
3842   auto &TLI = DAG.getTargetLoweringInfo();
3843   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3844 
3845   SDNodeFlags Flags;
3846   if (auto *PNI = dyn_cast<PossiblyNonNegInst>(&I))
3847     Flags.setNonNeg(PNI->hasNonNeg());
3848 
3849   // Eagerly use nonneg information to canonicalize towards sign_extend if
3850   // that is the target's preference.
3851   // TODO: Let the target do this later.
3852   if (Flags.hasNonNeg() &&
3853       TLI.isSExtCheaperThanZExt(N.getValueType(), DestVT)) {
3854     setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N));
3855     return;
3856   }
3857 
3858   setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurSDLoc(), DestVT, N, Flags));
3859 }
3860 
3861 void SelectionDAGBuilder::visitSExt(const User &I) {
3862   // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3863   // SExt also can't be a cast to bool for same reason. So, nothing much to do
3864   SDValue N = getValue(I.getOperand(0));
3865   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3866                                                         I.getType());
3867   setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N));
3868 }
3869 
3870 void SelectionDAGBuilder::visitFPTrunc(const User &I) {
3871   // FPTrunc is never a no-op cast, no need to check
3872   SDValue N = getValue(I.getOperand(0));
3873   SDLoc dl = getCurSDLoc();
3874   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3875   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3876   setValue(&I, DAG.getNode(ISD::FP_ROUND, dl, DestVT, N,
3877                            DAG.getTargetConstant(
3878                                0, dl, TLI.getPointerTy(DAG.getDataLayout()))));
3879 }
3880 
3881 void SelectionDAGBuilder::visitFPExt(const User &I) {
3882   // FPExt is never a no-op cast, no need to check
3883   SDValue N = getValue(I.getOperand(0));
3884   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3885                                                         I.getType());
3886   setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurSDLoc(), DestVT, N));
3887 }
3888 
3889 void SelectionDAGBuilder::visitFPToUI(const User &I) {
3890   // FPToUI is never a no-op cast, no need to check
3891   SDValue N = getValue(I.getOperand(0));
3892   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3893                                                         I.getType());
3894   setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurSDLoc(), DestVT, N));
3895 }
3896 
3897 void SelectionDAGBuilder::visitFPToSI(const User &I) {
3898   // FPToSI is never a no-op cast, no need to check
3899   SDValue N = getValue(I.getOperand(0));
3900   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3901                                                         I.getType());
3902   setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurSDLoc(), DestVT, N));
3903 }
3904 
3905 void SelectionDAGBuilder::visitUIToFP(const User &I) {
3906   // UIToFP is never a no-op cast, no need to check
3907   SDValue N = getValue(I.getOperand(0));
3908   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3909                                                         I.getType());
3910   SDNodeFlags Flags;
3911   if (auto *PNI = dyn_cast<PossiblyNonNegInst>(&I))
3912     Flags.setNonNeg(PNI->hasNonNeg());
3913 
3914   setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurSDLoc(), DestVT, N, Flags));
3915 }
3916 
3917 void SelectionDAGBuilder::visitSIToFP(const User &I) {
3918   // SIToFP is never a no-op cast, no need to check
3919   SDValue N = getValue(I.getOperand(0));
3920   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3921                                                         I.getType());
3922   setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurSDLoc(), DestVT, N));
3923 }
3924 
3925 void SelectionDAGBuilder::visitPtrToInt(const User &I) {
3926   // What to do depends on the size of the integer and the size of the pointer.
3927   // We can either truncate, zero extend, or no-op, accordingly.
3928   SDValue N = getValue(I.getOperand(0));
3929   auto &TLI = DAG.getTargetLoweringInfo();
3930   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3931                                                         I.getType());
3932   EVT PtrMemVT =
3933       TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
3934   N = DAG.getPtrExtOrTrunc(N, getCurSDLoc(), PtrMemVT);
3935   N = DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT);
3936   setValue(&I, N);
3937 }
3938 
3939 void SelectionDAGBuilder::visitIntToPtr(const User &I) {
3940   // What to do depends on the size of the integer and the size of the pointer.
3941   // We can either truncate, zero extend, or no-op, accordingly.
3942   SDValue N = getValue(I.getOperand(0));
3943   auto &TLI = DAG.getTargetLoweringInfo();
3944   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3945   EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getType());
3946   N = DAG.getZExtOrTrunc(N, getCurSDLoc(), PtrMemVT);
3947   N = DAG.getPtrExtOrTrunc(N, getCurSDLoc(), DestVT);
3948   setValue(&I, N);
3949 }
3950 
3951 void SelectionDAGBuilder::visitBitCast(const User &I) {
3952   SDValue N = getValue(I.getOperand(0));
3953   SDLoc dl = getCurSDLoc();
3954   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3955                                                         I.getType());
3956 
3957   // BitCast assures us that source and destination are the same size so this is
3958   // either a BITCAST or a no-op.
3959   if (DestVT != N.getValueType())
3960     setValue(&I, DAG.getNode(ISD::BITCAST, dl,
3961                              DestVT, N)); // convert types.
3962   // Check if the original LLVM IR Operand was a ConstantInt, because getValue()
3963   // might fold any kind of constant expression to an integer constant and that
3964   // is not what we are looking for. Only recognize a bitcast of a genuine
3965   // constant integer as an opaque constant.
3966   else if(ConstantInt *C = dyn_cast<ConstantInt>(I.getOperand(0)))
3967     setValue(&I, DAG.getConstant(C->getValue(), dl, DestVT, /*isTarget=*/false,
3968                                  /*isOpaque*/true));
3969   else
3970     setValue(&I, N);            // noop cast.
3971 }
3972 
3973 void SelectionDAGBuilder::visitAddrSpaceCast(const User &I) {
3974   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3975   const Value *SV = I.getOperand(0);
3976   SDValue N = getValue(SV);
3977   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3978 
3979   unsigned SrcAS = SV->getType()->getPointerAddressSpace();
3980   unsigned DestAS = I.getType()->getPointerAddressSpace();
3981 
3982   if (!TM.isNoopAddrSpaceCast(SrcAS, DestAS))
3983     N = DAG.getAddrSpaceCast(getCurSDLoc(), DestVT, N, SrcAS, DestAS);
3984 
3985   setValue(&I, N);
3986 }
3987 
3988 void SelectionDAGBuilder::visitInsertElement(const User &I) {
3989   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3990   SDValue InVec = getValue(I.getOperand(0));
3991   SDValue InVal = getValue(I.getOperand(1));
3992   SDValue InIdx = DAG.getZExtOrTrunc(getValue(I.getOperand(2)), getCurSDLoc(),
3993                                      TLI.getVectorIdxTy(DAG.getDataLayout()));
3994   setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurSDLoc(),
3995                            TLI.getValueType(DAG.getDataLayout(), I.getType()),
3996                            InVec, InVal, InIdx));
3997 }
3998 
3999 void SelectionDAGBuilder::visitExtractElement(const User &I) {
4000   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4001   SDValue InVec = getValue(I.getOperand(0));
4002   SDValue InIdx = DAG.getZExtOrTrunc(getValue(I.getOperand(1)), getCurSDLoc(),
4003                                      TLI.getVectorIdxTy(DAG.getDataLayout()));
4004   setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(),
4005                            TLI.getValueType(DAG.getDataLayout(), I.getType()),
4006                            InVec, InIdx));
4007 }
4008 
4009 void SelectionDAGBuilder::visitShuffleVector(const User &I) {
4010   SDValue Src1 = getValue(I.getOperand(0));
4011   SDValue Src2 = getValue(I.getOperand(1));
4012   ArrayRef<int> Mask;
4013   if (auto *SVI = dyn_cast<ShuffleVectorInst>(&I))
4014     Mask = SVI->getShuffleMask();
4015   else
4016     Mask = cast<ConstantExpr>(I).getShuffleMask();
4017   SDLoc DL = getCurSDLoc();
4018   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4019   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4020   EVT SrcVT = Src1.getValueType();
4021 
4022   if (all_of(Mask, [](int Elem) { return Elem == 0; }) &&
4023       VT.isScalableVector()) {
4024     // Canonical splat form of first element of first input vector.
4025     SDValue FirstElt =
4026         DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SrcVT.getScalarType(), Src1,
4027                     DAG.getVectorIdxConstant(0, DL));
4028     setValue(&I, DAG.getNode(ISD::SPLAT_VECTOR, DL, VT, FirstElt));
4029     return;
4030   }
4031 
4032   // For now, we only handle splats for scalable vectors.
4033   // The DAGCombiner will perform a BUILD_VECTOR -> SPLAT_VECTOR transformation
4034   // for targets that support a SPLAT_VECTOR for non-scalable vector types.
4035   assert(!VT.isScalableVector() && "Unsupported scalable vector shuffle");
4036 
4037   unsigned SrcNumElts = SrcVT.getVectorNumElements();
4038   unsigned MaskNumElts = Mask.size();
4039 
4040   if (SrcNumElts == MaskNumElts) {
4041     setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, Mask));
4042     return;
4043   }
4044 
4045   // Normalize the shuffle vector since mask and vector length don't match.
4046   if (SrcNumElts < MaskNumElts) {
4047     // Mask is longer than the source vectors. We can use concatenate vector to
4048     // make the mask and vectors lengths match.
4049 
4050     if (MaskNumElts % SrcNumElts == 0) {
4051       // Mask length is a multiple of the source vector length.
4052       // Check if the shuffle is some kind of concatenation of the input
4053       // vectors.
4054       unsigned NumConcat = MaskNumElts / SrcNumElts;
4055       bool IsConcat = true;
4056       SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
4057       for (unsigned i = 0; i != MaskNumElts; ++i) {
4058         int Idx = Mask[i];
4059         if (Idx < 0)
4060           continue;
4061         // Ensure the indices in each SrcVT sized piece are sequential and that
4062         // the same source is used for the whole piece.
4063         if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
4064             (ConcatSrcs[i / SrcNumElts] >= 0 &&
4065              ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts))) {
4066           IsConcat = false;
4067           break;
4068         }
4069         // Remember which source this index came from.
4070         ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
4071       }
4072 
4073       // The shuffle is concatenating multiple vectors together. Just emit
4074       // a CONCAT_VECTORS operation.
4075       if (IsConcat) {
4076         SmallVector<SDValue, 8> ConcatOps;
4077         for (auto Src : ConcatSrcs) {
4078           if (Src < 0)
4079             ConcatOps.push_back(DAG.getUNDEF(SrcVT));
4080           else if (Src == 0)
4081             ConcatOps.push_back(Src1);
4082           else
4083             ConcatOps.push_back(Src2);
4084         }
4085         setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps));
4086         return;
4087       }
4088     }
4089 
4090     unsigned PaddedMaskNumElts = alignTo(MaskNumElts, SrcNumElts);
4091     unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
4092     EVT PaddedVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(),
4093                                     PaddedMaskNumElts);
4094 
4095     // Pad both vectors with undefs to make them the same length as the mask.
4096     SDValue UndefVal = DAG.getUNDEF(SrcVT);
4097 
4098     SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
4099     SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
4100     MOps1[0] = Src1;
4101     MOps2[0] = Src2;
4102 
4103     Src1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps1);
4104     Src2 = DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps2);
4105 
4106     // Readjust mask for new input vector length.
4107     SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1);
4108     for (unsigned i = 0; i != MaskNumElts; ++i) {
4109       int Idx = Mask[i];
4110       if (Idx >= (int)SrcNumElts)
4111         Idx -= SrcNumElts - PaddedMaskNumElts;
4112       MappedOps[i] = Idx;
4113     }
4114 
4115     SDValue Result = DAG.getVectorShuffle(PaddedVT, DL, Src1, Src2, MappedOps);
4116 
4117     // If the concatenated vector was padded, extract a subvector with the
4118     // correct number of elements.
4119     if (MaskNumElts != PaddedMaskNumElts)
4120       Result = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Result,
4121                            DAG.getVectorIdxConstant(0, DL));
4122 
4123     setValue(&I, Result);
4124     return;
4125   }
4126 
4127   assert(SrcNumElts > MaskNumElts);
4128 
4129   // Analyze the access pattern of the vector to see if we can extract
4130   // two subvectors and do the shuffle.
4131   int StartIdx[2] = {-1, -1}; // StartIdx to extract from
4132   bool CanExtract = true;
4133   for (int Idx : Mask) {
4134     unsigned Input = 0;
4135     if (Idx < 0)
4136       continue;
4137 
4138     if (Idx >= (int)SrcNumElts) {
4139       Input = 1;
4140       Idx -= SrcNumElts;
4141     }
4142 
4143     // If all the indices come from the same MaskNumElts sized portion of
4144     // the sources we can use extract. Also make sure the extract wouldn't
4145     // extract past the end of the source.
4146     int NewStartIdx = alignDown(Idx, MaskNumElts);
4147     if (NewStartIdx + MaskNumElts > SrcNumElts ||
4148         (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
4149       CanExtract = false;
4150     // Make sure we always update StartIdx as we use it to track if all
4151     // elements are undef.
4152     StartIdx[Input] = NewStartIdx;
4153   }
4154 
4155   if (StartIdx[0] < 0 && StartIdx[1] < 0) {
4156     setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
4157     return;
4158   }
4159   if (CanExtract) {
4160     // Extract appropriate subvector and generate a vector shuffle
4161     for (unsigned Input = 0; Input < 2; ++Input) {
4162       SDValue &Src = Input == 0 ? Src1 : Src2;
4163       if (StartIdx[Input] < 0)
4164         Src = DAG.getUNDEF(VT);
4165       else {
4166         Src = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Src,
4167                           DAG.getVectorIdxConstant(StartIdx[Input], DL));
4168       }
4169     }
4170 
4171     // Calculate new mask.
4172     SmallVector<int, 8> MappedOps(Mask);
4173     for (int &Idx : MappedOps) {
4174       if (Idx >= (int)SrcNumElts)
4175         Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
4176       else if (Idx >= 0)
4177         Idx -= StartIdx[0];
4178     }
4179 
4180     setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, MappedOps));
4181     return;
4182   }
4183 
4184   // We can't use either concat vectors or extract subvectors so fall back to
4185   // replacing the shuffle with extract and build vector.
4186   // to insert and build vector.
4187   EVT EltVT = VT.getVectorElementType();
4188   SmallVector<SDValue,8> Ops;
4189   for (int Idx : Mask) {
4190     SDValue Res;
4191 
4192     if (Idx < 0) {
4193       Res = DAG.getUNDEF(EltVT);
4194     } else {
4195       SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
4196       if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts;
4197 
4198       Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Src,
4199                         DAG.getVectorIdxConstant(Idx, DL));
4200     }
4201 
4202     Ops.push_back(Res);
4203   }
4204 
4205   setValue(&I, DAG.getBuildVector(VT, DL, Ops));
4206 }
4207 
4208 void SelectionDAGBuilder::visitInsertValue(const InsertValueInst &I) {
4209   ArrayRef<unsigned> Indices = I.getIndices();
4210   const Value *Op0 = I.getOperand(0);
4211   const Value *Op1 = I.getOperand(1);
4212   Type *AggTy = I.getType();
4213   Type *ValTy = Op1->getType();
4214   bool IntoUndef = isa<UndefValue>(Op0);
4215   bool FromUndef = isa<UndefValue>(Op1);
4216 
4217   unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
4218 
4219   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4220   SmallVector<EVT, 4> AggValueVTs;
4221   ComputeValueVTs(TLI, DAG.getDataLayout(), AggTy, AggValueVTs);
4222   SmallVector<EVT, 4> ValValueVTs;
4223   ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
4224 
4225   unsigned NumAggValues = AggValueVTs.size();
4226   unsigned NumValValues = ValValueVTs.size();
4227   SmallVector<SDValue, 4> Values(NumAggValues);
4228 
4229   // Ignore an insertvalue that produces an empty object
4230   if (!NumAggValues) {
4231     setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
4232     return;
4233   }
4234 
4235   SDValue Agg = getValue(Op0);
4236   unsigned i = 0;
4237   // Copy the beginning value(s) from the original aggregate.
4238   for (; i != LinearIndex; ++i)
4239     Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
4240                 SDValue(Agg.getNode(), Agg.getResNo() + i);
4241   // Copy values from the inserted value(s).
4242   if (NumValValues) {
4243     SDValue Val = getValue(Op1);
4244     for (; i != LinearIndex + NumValValues; ++i)
4245       Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
4246                   SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
4247   }
4248   // Copy remaining value(s) from the original aggregate.
4249   for (; i != NumAggValues; ++i)
4250     Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
4251                 SDValue(Agg.getNode(), Agg.getResNo() + i);
4252 
4253   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
4254                            DAG.getVTList(AggValueVTs), Values));
4255 }
4256 
4257 void SelectionDAGBuilder::visitExtractValue(const ExtractValueInst &I) {
4258   ArrayRef<unsigned> Indices = I.getIndices();
4259   const Value *Op0 = I.getOperand(0);
4260   Type *AggTy = Op0->getType();
4261   Type *ValTy = I.getType();
4262   bool OutOfUndef = isa<UndefValue>(Op0);
4263 
4264   unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
4265 
4266   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4267   SmallVector<EVT, 4> ValValueVTs;
4268   ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
4269 
4270   unsigned NumValValues = ValValueVTs.size();
4271 
4272   // Ignore a extractvalue that produces an empty object
4273   if (!NumValValues) {
4274     setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
4275     return;
4276   }
4277 
4278   SmallVector<SDValue, 4> Values(NumValValues);
4279 
4280   SDValue Agg = getValue(Op0);
4281   // Copy out the selected value(s).
4282   for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
4283     Values[i - LinearIndex] =
4284       OutOfUndef ?
4285         DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
4286         SDValue(Agg.getNode(), Agg.getResNo() + i);
4287 
4288   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
4289                            DAG.getVTList(ValValueVTs), Values));
4290 }
4291 
4292 void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
4293   Value *Op0 = I.getOperand(0);
4294   // Note that the pointer operand may be a vector of pointers. Take the scalar
4295   // element which holds a pointer.
4296   unsigned AS = Op0->getType()->getScalarType()->getPointerAddressSpace();
4297   SDValue N = getValue(Op0);
4298   SDLoc dl = getCurSDLoc();
4299   auto &TLI = DAG.getTargetLoweringInfo();
4300   GEPNoWrapFlags NW = cast<GEPOperator>(I).getNoWrapFlags();
4301 
4302   // Normalize Vector GEP - all scalar operands should be converted to the
4303   // splat vector.
4304   bool IsVectorGEP = I.getType()->isVectorTy();
4305   ElementCount VectorElementCount =
4306       IsVectorGEP ? cast<VectorType>(I.getType())->getElementCount()
4307                   : ElementCount::getFixed(0);
4308 
4309   if (IsVectorGEP && !N.getValueType().isVector()) {
4310     LLVMContext &Context = *DAG.getContext();
4311     EVT VT = EVT::getVectorVT(Context, N.getValueType(), VectorElementCount);
4312     N = DAG.getSplat(VT, dl, N);
4313   }
4314 
4315   for (gep_type_iterator GTI = gep_type_begin(&I), E = gep_type_end(&I);
4316        GTI != E; ++GTI) {
4317     const Value *Idx = GTI.getOperand();
4318     if (StructType *StTy = GTI.getStructTypeOrNull()) {
4319       unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
4320       if (Field) {
4321         // N = N + Offset
4322         uint64_t Offset =
4323             DAG.getDataLayout().getStructLayout(StTy)->getElementOffset(Field);
4324 
4325         // In an inbounds GEP with an offset that is nonnegative even when
4326         // interpreted as signed, assume there is no unsigned overflow.
4327         SDNodeFlags Flags;
4328         if (NW.hasNoUnsignedWrap() ||
4329             (int64_t(Offset) >= 0 && NW.hasNoUnsignedSignedWrap()))
4330           Flags |= SDNodeFlags::NoUnsignedWrap;
4331 
4332         N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N,
4333                         DAG.getConstant(Offset, dl, N.getValueType()), Flags);
4334       }
4335     } else {
4336       // IdxSize is the width of the arithmetic according to IR semantics.
4337       // In SelectionDAG, we may prefer to do arithmetic in a wider bitwidth
4338       // (and fix up the result later).
4339       unsigned IdxSize = DAG.getDataLayout().getIndexSizeInBits(AS);
4340       MVT IdxTy = MVT::getIntegerVT(IdxSize);
4341       TypeSize ElementSize =
4342           GTI.getSequentialElementStride(DAG.getDataLayout());
4343       // We intentionally mask away the high bits here; ElementSize may not
4344       // fit in IdxTy.
4345       APInt ElementMul(IdxSize, ElementSize.getKnownMinValue(),
4346                        /*isSigned=*/false, /*implicitTrunc=*/true);
4347       bool ElementScalable = ElementSize.isScalable();
4348 
4349       // If this is a scalar constant or a splat vector of constants,
4350       // handle it quickly.
4351       const auto *C = dyn_cast<Constant>(Idx);
4352       if (C && isa<VectorType>(C->getType()))
4353         C = C->getSplatValue();
4354 
4355       const auto *CI = dyn_cast_or_null<ConstantInt>(C);
4356       if (CI && CI->isZero())
4357         continue;
4358       if (CI && !ElementScalable) {
4359         APInt Offs = ElementMul * CI->getValue().sextOrTrunc(IdxSize);
4360         LLVMContext &Context = *DAG.getContext();
4361         SDValue OffsVal;
4362         if (IsVectorGEP)
4363           OffsVal = DAG.getConstant(
4364               Offs, dl, EVT::getVectorVT(Context, IdxTy, VectorElementCount));
4365         else
4366           OffsVal = DAG.getConstant(Offs, dl, IdxTy);
4367 
4368         // In an inbounds GEP with an offset that is nonnegative even when
4369         // interpreted as signed, assume there is no unsigned overflow.
4370         SDNodeFlags Flags;
4371         if (NW.hasNoUnsignedWrap() ||
4372             (Offs.isNonNegative() && NW.hasNoUnsignedSignedWrap()))
4373           Flags.setNoUnsignedWrap(true);
4374 
4375         OffsVal = DAG.getSExtOrTrunc(OffsVal, dl, N.getValueType());
4376 
4377         N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N, OffsVal, Flags);
4378         continue;
4379       }
4380 
4381       // N = N + Idx * ElementMul;
4382       SDValue IdxN = getValue(Idx);
4383 
4384       if (!IdxN.getValueType().isVector() && IsVectorGEP) {
4385         EVT VT = EVT::getVectorVT(*Context, IdxN.getValueType(),
4386                                   VectorElementCount);
4387         IdxN = DAG.getSplat(VT, dl, IdxN);
4388       }
4389 
4390       // If the index is smaller or larger than intptr_t, truncate or extend
4391       // it.
4392       IdxN = DAG.getSExtOrTrunc(IdxN, dl, N.getValueType());
4393 
4394       SDNodeFlags ScaleFlags;
4395       // The multiplication of an index by the type size does not wrap the
4396       // pointer index type in a signed sense (mul nsw).
4397       ScaleFlags.setNoSignedWrap(NW.hasNoUnsignedSignedWrap());
4398 
4399       // The multiplication of an index by the type size does not wrap the
4400       // pointer index type in an unsigned sense (mul nuw).
4401       ScaleFlags.setNoUnsignedWrap(NW.hasNoUnsignedWrap());
4402 
4403       if (ElementScalable) {
4404         EVT VScaleTy = N.getValueType().getScalarType();
4405         SDValue VScale = DAG.getNode(
4406             ISD::VSCALE, dl, VScaleTy,
4407             DAG.getConstant(ElementMul.getZExtValue(), dl, VScaleTy));
4408         if (IsVectorGEP)
4409           VScale = DAG.getSplatVector(N.getValueType(), dl, VScale);
4410         IdxN = DAG.getNode(ISD::MUL, dl, N.getValueType(), IdxN, VScale,
4411                            ScaleFlags);
4412       } else {
4413         // If this is a multiply by a power of two, turn it into a shl
4414         // immediately.  This is a very common case.
4415         if (ElementMul != 1) {
4416           if (ElementMul.isPowerOf2()) {
4417             unsigned Amt = ElementMul.logBase2();
4418             IdxN = DAG.getNode(ISD::SHL, dl, N.getValueType(), IdxN,
4419                                DAG.getConstant(Amt, dl, IdxN.getValueType()),
4420                                ScaleFlags);
4421           } else {
4422             SDValue Scale = DAG.getConstant(ElementMul.getZExtValue(), dl,
4423                                             IdxN.getValueType());
4424             IdxN = DAG.getNode(ISD::MUL, dl, N.getValueType(), IdxN, Scale,
4425                                ScaleFlags);
4426           }
4427         }
4428       }
4429 
4430       // The successive addition of the current address, truncated to the
4431       // pointer index type and interpreted as an unsigned number, and each
4432       // offset, also interpreted as an unsigned number, does not wrap the
4433       // pointer index type (add nuw).
4434       SDNodeFlags AddFlags;
4435       AddFlags.setNoUnsignedWrap(NW.hasNoUnsignedWrap());
4436 
4437       N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N, IdxN, AddFlags);
4438     }
4439   }
4440 
4441   MVT PtrTy = TLI.getPointerTy(DAG.getDataLayout(), AS);
4442   MVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout(), AS);
4443   if (IsVectorGEP) {
4444     PtrTy = MVT::getVectorVT(PtrTy, VectorElementCount);
4445     PtrMemTy = MVT::getVectorVT(PtrMemTy, VectorElementCount);
4446   }
4447 
4448   if (PtrMemTy != PtrTy && !cast<GEPOperator>(I).isInBounds())
4449     N = DAG.getPtrExtendInReg(N, dl, PtrMemTy);
4450 
4451   setValue(&I, N);
4452 }
4453 
4454 void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
4455   // If this is a fixed sized alloca in the entry block of the function,
4456   // allocate it statically on the stack.
4457   if (FuncInfo.StaticAllocaMap.count(&I))
4458     return;   // getValue will auto-populate this.
4459 
4460   SDLoc dl = getCurSDLoc();
4461   Type *Ty = I.getAllocatedType();
4462   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4463   auto &DL = DAG.getDataLayout();
4464   TypeSize TySize = DL.getTypeAllocSize(Ty);
4465   MaybeAlign Alignment = std::max(DL.getPrefTypeAlign(Ty), I.getAlign());
4466 
4467   SDValue AllocSize = getValue(I.getArraySize());
4468 
4469   EVT IntPtr = TLI.getPointerTy(DL, I.getAddressSpace());
4470   if (AllocSize.getValueType() != IntPtr)
4471     AllocSize = DAG.getZExtOrTrunc(AllocSize, dl, IntPtr);
4472 
4473   if (TySize.isScalable())
4474     AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr, AllocSize,
4475                             DAG.getVScale(dl, IntPtr,
4476                                           APInt(IntPtr.getScalarSizeInBits(),
4477                                                 TySize.getKnownMinValue())));
4478   else {
4479     SDValue TySizeValue =
4480         DAG.getConstant(TySize.getFixedValue(), dl, MVT::getIntegerVT(64));
4481     AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr, AllocSize,
4482                             DAG.getZExtOrTrunc(TySizeValue, dl, IntPtr));
4483   }
4484 
4485   // Handle alignment.  If the requested alignment is less than or equal to
4486   // the stack alignment, ignore it.  If the size is greater than or equal to
4487   // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
4488   Align StackAlign = DAG.getSubtarget().getFrameLowering()->getStackAlign();
4489   if (*Alignment <= StackAlign)
4490     Alignment = std::nullopt;
4491 
4492   const uint64_t StackAlignMask = StackAlign.value() - 1U;
4493   // Round the size of the allocation up to the stack alignment size
4494   // by add SA-1 to the size. This doesn't overflow because we're computing
4495   // an address inside an alloca.
4496   AllocSize = DAG.getNode(ISD::ADD, dl, AllocSize.getValueType(), AllocSize,
4497                           DAG.getConstant(StackAlignMask, dl, IntPtr),
4498                           SDNodeFlags::NoUnsignedWrap);
4499 
4500   // Mask out the low bits for alignment purposes.
4501   AllocSize = DAG.getNode(ISD::AND, dl, AllocSize.getValueType(), AllocSize,
4502                           DAG.getSignedConstant(~StackAlignMask, dl, IntPtr));
4503 
4504   SDValue Ops[] = {
4505       getRoot(), AllocSize,
4506       DAG.getConstant(Alignment ? Alignment->value() : 0, dl, IntPtr)};
4507   SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
4508   SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, dl, VTs, Ops);
4509   setValue(&I, DSA);
4510   DAG.setRoot(DSA.getValue(1));
4511 
4512   assert(FuncInfo.MF->getFrameInfo().hasVarSizedObjects());
4513 }
4514 
4515 static const MDNode *getRangeMetadata(const Instruction &I) {
4516   // If !noundef is not present, then !range violation results in a poison
4517   // value rather than immediate undefined behavior. In theory, transferring
4518   // these annotations to SDAG is fine, but in practice there are key SDAG
4519   // transforms that are known not to be poison-safe, such as folding logical
4520   // and/or to bitwise and/or. For now, only transfer !range if !noundef is
4521   // also present.
4522   if (!I.hasMetadata(LLVMContext::MD_noundef))
4523     return nullptr;
4524   return I.getMetadata(LLVMContext::MD_range);
4525 }
4526 
4527 static std::optional<ConstantRange> getRange(const Instruction &I) {
4528   if (const auto *CB = dyn_cast<CallBase>(&I)) {
4529     // see comment in getRangeMetadata about this check
4530     if (CB->hasRetAttr(Attribute::NoUndef))
4531       return CB->getRange();
4532   }
4533   if (const MDNode *Range = getRangeMetadata(I))
4534     return getConstantRangeFromMetadata(*Range);
4535   return std::nullopt;
4536 }
4537 
4538 void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
4539   if (I.isAtomic())
4540     return visitAtomicLoad(I);
4541 
4542   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4543   const Value *SV = I.getOperand(0);
4544   if (TLI.supportSwiftError()) {
4545     // Swifterror values can come from either a function parameter with
4546     // swifterror attribute or an alloca with swifterror attribute.
4547     if (const Argument *Arg = dyn_cast<Argument>(SV)) {
4548       if (Arg->hasSwiftErrorAttr())
4549         return visitLoadFromSwiftError(I);
4550     }
4551 
4552     if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
4553       if (Alloca->isSwiftError())
4554         return visitLoadFromSwiftError(I);
4555     }
4556   }
4557 
4558   SDValue Ptr = getValue(SV);
4559 
4560   Type *Ty = I.getType();
4561   SmallVector<EVT, 4> ValueVTs, MemVTs;
4562   SmallVector<TypeSize, 4> Offsets;
4563   ComputeValueVTs(TLI, DAG.getDataLayout(), Ty, ValueVTs, &MemVTs, &Offsets);
4564   unsigned NumValues = ValueVTs.size();
4565   if (NumValues == 0)
4566     return;
4567 
4568   Align Alignment = I.getAlign();
4569   AAMDNodes AAInfo = I.getAAMetadata();
4570   const MDNode *Ranges = getRangeMetadata(I);
4571   bool isVolatile = I.isVolatile();
4572   MachineMemOperand::Flags MMOFlags =
4573       TLI.getLoadMemOperandFlags(I, DAG.getDataLayout(), AC, LibInfo);
4574 
4575   SDValue Root;
4576   bool ConstantMemory = false;
4577   if (isVolatile)
4578     // Serialize volatile loads with other side effects.
4579     Root = getRoot();
4580   else if (NumValues > MaxParallelChains)
4581     Root = getMemoryRoot();
4582   else if (AA &&
4583            AA->pointsToConstantMemory(MemoryLocation(
4584                SV,
4585                LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)),
4586                AAInfo))) {
4587     // Do not serialize (non-volatile) loads of constant memory with anything.
4588     Root = DAG.getEntryNode();
4589     ConstantMemory = true;
4590     MMOFlags |= MachineMemOperand::MOInvariant;
4591   } else {
4592     // Do not serialize non-volatile loads against each other.
4593     Root = DAG.getRoot();
4594   }
4595 
4596   SDLoc dl = getCurSDLoc();
4597 
4598   if (isVolatile)
4599     Root = TLI.prepareVolatileOrAtomicLoad(Root, dl, DAG);
4600 
4601   SmallVector<SDValue, 4> Values(NumValues);
4602   SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
4603 
4604   unsigned ChainI = 0;
4605   for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4606     // Serializing loads here may result in excessive register pressure, and
4607     // TokenFactor places arbitrary choke points on the scheduler. SD scheduling
4608     // could recover a bit by hoisting nodes upward in the chain by recognizing
4609     // they are side-effect free or do not alias. The optimizer should really
4610     // avoid this case by converting large object/array copies to llvm.memcpy
4611     // (MaxParallelChains should always remain as failsafe).
4612     if (ChainI == MaxParallelChains) {
4613       assert(PendingLoads.empty() && "PendingLoads must be serialized first");
4614       SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4615                                   ArrayRef(Chains.data(), ChainI));
4616       Root = Chain;
4617       ChainI = 0;
4618     }
4619 
4620     // TODO: MachinePointerInfo only supports a fixed length offset.
4621     MachinePointerInfo PtrInfo =
4622         !Offsets[i].isScalable() || Offsets[i].isZero()
4623             ? MachinePointerInfo(SV, Offsets[i].getKnownMinValue())
4624             : MachinePointerInfo();
4625 
4626     SDValue A = DAG.getObjectPtrOffset(dl, Ptr, Offsets[i]);
4627     SDValue L = DAG.getLoad(MemVTs[i], dl, Root, A, PtrInfo, Alignment,
4628                             MMOFlags, AAInfo, Ranges);
4629     Chains[ChainI] = L.getValue(1);
4630 
4631     if (MemVTs[i] != ValueVTs[i])
4632       L = DAG.getPtrExtOrTrunc(L, dl, ValueVTs[i]);
4633 
4634     Values[i] = L;
4635   }
4636 
4637   if (!ConstantMemory) {
4638     SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4639                                 ArrayRef(Chains.data(), ChainI));
4640     if (isVolatile)
4641       DAG.setRoot(Chain);
4642     else
4643       PendingLoads.push_back(Chain);
4644   }
4645 
4646   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, dl,
4647                            DAG.getVTList(ValueVTs), Values));
4648 }
4649 
4650 void SelectionDAGBuilder::visitStoreToSwiftError(const StoreInst &I) {
4651   assert(DAG.getTargetLoweringInfo().supportSwiftError() &&
4652          "call visitStoreToSwiftError when backend supports swifterror");
4653 
4654   SmallVector<EVT, 4> ValueVTs;
4655   SmallVector<uint64_t, 4> Offsets;
4656   const Value *SrcV = I.getOperand(0);
4657   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
4658                   SrcV->getType(), ValueVTs, &Offsets, 0);
4659   assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
4660          "expect a single EVT for swifterror");
4661 
4662   SDValue Src = getValue(SrcV);
4663   // Create a virtual register, then update the virtual register.
4664   Register VReg =
4665       SwiftError.getOrCreateVRegDefAt(&I, FuncInfo.MBB, I.getPointerOperand());
4666   // Chain, DL, Reg, N or Chain, DL, Reg, N, Glue
4667   // Chain can be getRoot or getControlRoot.
4668   SDValue CopyNode = DAG.getCopyToReg(getRoot(), getCurSDLoc(), VReg,
4669                                       SDValue(Src.getNode(), Src.getResNo()));
4670   DAG.setRoot(CopyNode);
4671 }
4672 
4673 void SelectionDAGBuilder::visitLoadFromSwiftError(const LoadInst &I) {
4674   assert(DAG.getTargetLoweringInfo().supportSwiftError() &&
4675          "call visitLoadFromSwiftError when backend supports swifterror");
4676 
4677   assert(!I.isVolatile() &&
4678          !I.hasMetadata(LLVMContext::MD_nontemporal) &&
4679          !I.hasMetadata(LLVMContext::MD_invariant_load) &&
4680          "Support volatile, non temporal, invariant for load_from_swift_error");
4681 
4682   const Value *SV = I.getOperand(0);
4683   Type *Ty = I.getType();
4684   assert(
4685       (!AA ||
4686        !AA->pointsToConstantMemory(MemoryLocation(
4687            SV, LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)),
4688            I.getAAMetadata()))) &&
4689       "load_from_swift_error should not be constant memory");
4690 
4691   SmallVector<EVT, 4> ValueVTs;
4692   SmallVector<uint64_t, 4> Offsets;
4693   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), Ty,
4694                   ValueVTs, &Offsets, 0);
4695   assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
4696          "expect a single EVT for swifterror");
4697 
4698   // Chain, DL, Reg, VT, Glue or Chain, DL, Reg, VT
4699   SDValue L = DAG.getCopyFromReg(
4700       getRoot(), getCurSDLoc(),
4701       SwiftError.getOrCreateVRegUseAt(&I, FuncInfo.MBB, SV), ValueVTs[0]);
4702 
4703   setValue(&I, L);
4704 }
4705 
4706 void SelectionDAGBuilder::visitStore(const StoreInst &I) {
4707   if (I.isAtomic())
4708     return visitAtomicStore(I);
4709 
4710   const Value *SrcV = I.getOperand(0);
4711   const Value *PtrV = I.getOperand(1);
4712 
4713   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4714   if (TLI.supportSwiftError()) {
4715     // Swifterror values can come from either a function parameter with
4716     // swifterror attribute or an alloca with swifterror attribute.
4717     if (const Argument *Arg = dyn_cast<Argument>(PtrV)) {
4718       if (Arg->hasSwiftErrorAttr())
4719         return visitStoreToSwiftError(I);
4720     }
4721 
4722     if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
4723       if (Alloca->isSwiftError())
4724         return visitStoreToSwiftError(I);
4725     }
4726   }
4727 
4728   SmallVector<EVT, 4> ValueVTs, MemVTs;
4729   SmallVector<TypeSize, 4> Offsets;
4730   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
4731                   SrcV->getType(), ValueVTs, &MemVTs, &Offsets);
4732   unsigned NumValues = ValueVTs.size();
4733   if (NumValues == 0)
4734     return;
4735 
4736   // Get the lowered operands. Note that we do this after
4737   // checking if NumResults is zero, because with zero results
4738   // the operands won't have values in the map.
4739   SDValue Src = getValue(SrcV);
4740   SDValue Ptr = getValue(PtrV);
4741 
4742   SDValue Root = I.isVolatile() ? getRoot() : getMemoryRoot();
4743   SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
4744   SDLoc dl = getCurSDLoc();
4745   Align Alignment = I.getAlign();
4746   AAMDNodes AAInfo = I.getAAMetadata();
4747 
4748   auto MMOFlags = TLI.getStoreMemOperandFlags(I, DAG.getDataLayout());
4749 
4750   unsigned ChainI = 0;
4751   for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4752     // See visitLoad comments.
4753     if (ChainI == MaxParallelChains) {
4754       SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4755                                   ArrayRef(Chains.data(), ChainI));
4756       Root = Chain;
4757       ChainI = 0;
4758     }
4759 
4760     // TODO: MachinePointerInfo only supports a fixed length offset.
4761     MachinePointerInfo PtrInfo =
4762         !Offsets[i].isScalable() || Offsets[i].isZero()
4763             ? MachinePointerInfo(PtrV, Offsets[i].getKnownMinValue())
4764             : MachinePointerInfo();
4765 
4766     SDValue Add = DAG.getObjectPtrOffset(dl, Ptr, Offsets[i]);
4767     SDValue Val = SDValue(Src.getNode(), Src.getResNo() + i);
4768     if (MemVTs[i] != ValueVTs[i])
4769       Val = DAG.getPtrExtOrTrunc(Val, dl, MemVTs[i]);
4770     SDValue St =
4771         DAG.getStore(Root, dl, Val, Add, PtrInfo, Alignment, MMOFlags, AAInfo);
4772     Chains[ChainI] = St;
4773   }
4774 
4775   SDValue StoreNode = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4776                                   ArrayRef(Chains.data(), ChainI));
4777   setValue(&I, StoreNode);
4778   DAG.setRoot(StoreNode);
4779 }
4780 
4781 void SelectionDAGBuilder::visitMaskedStore(const CallInst &I,
4782                                            bool IsCompressing) {
4783   SDLoc sdl = getCurSDLoc();
4784 
4785   auto getMaskedStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4786                                Align &Alignment) {
4787     // llvm.masked.store.*(Src0, Ptr, alignment, Mask)
4788     Src0 = I.getArgOperand(0);
4789     Ptr = I.getArgOperand(1);
4790     Alignment = cast<ConstantInt>(I.getArgOperand(2))->getAlignValue();
4791     Mask = I.getArgOperand(3);
4792   };
4793   auto getCompressingStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4794                                     Align &Alignment) {
4795     // llvm.masked.compressstore.*(Src0, Ptr, Mask)
4796     Src0 = I.getArgOperand(0);
4797     Ptr = I.getArgOperand(1);
4798     Mask = I.getArgOperand(2);
4799     Alignment = I.getParamAlign(1).valueOrOne();
4800   };
4801 
4802   Value  *PtrOperand, *MaskOperand, *Src0Operand;
4803   Align Alignment;
4804   if (IsCompressing)
4805     getCompressingStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4806   else
4807     getMaskedStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4808 
4809   SDValue Ptr = getValue(PtrOperand);
4810   SDValue Src0 = getValue(Src0Operand);
4811   SDValue Mask = getValue(MaskOperand);
4812   SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
4813 
4814   EVT VT = Src0.getValueType();
4815 
4816   auto MMOFlags = MachineMemOperand::MOStore;
4817   if (I.hasMetadata(LLVMContext::MD_nontemporal))
4818     MMOFlags |= MachineMemOperand::MONonTemporal;
4819 
4820   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4821       MachinePointerInfo(PtrOperand), MMOFlags,
4822       LocationSize::beforeOrAfterPointer(), Alignment, I.getAAMetadata());
4823 
4824   const auto &TLI = DAG.getTargetLoweringInfo();
4825   const auto &TTI =
4826       TLI.getTargetMachine().getTargetTransformInfo(*I.getFunction());
4827   SDValue StoreNode =
4828       !IsCompressing &&
4829               TTI.hasConditionalLoadStoreForType(I.getArgOperand(0)->getType())
4830           ? TLI.visitMaskedStore(DAG, sdl, getMemoryRoot(), MMO, Ptr, Src0,
4831                                  Mask)
4832           : DAG.getMaskedStore(getMemoryRoot(), sdl, Src0, Ptr, Offset, Mask,
4833                                VT, MMO, ISD::UNINDEXED, /*Truncating=*/false,
4834                                IsCompressing);
4835   DAG.setRoot(StoreNode);
4836   setValue(&I, StoreNode);
4837 }
4838 
4839 // Get a uniform base for the Gather/Scatter intrinsic.
4840 // The first argument of the Gather/Scatter intrinsic is a vector of pointers.
4841 // We try to represent it as a base pointer + vector of indices.
4842 // Usually, the vector of pointers comes from a 'getelementptr' instruction.
4843 // The first operand of the GEP may be a single pointer or a vector of pointers
4844 // Example:
4845 //   %gep.ptr = getelementptr i32, <8 x i32*> %vptr, <8 x i32> %ind
4846 //  or
4847 //   %gep.ptr = getelementptr i32, i32* %ptr,        <8 x i32> %ind
4848 // %res = call <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> %gep.ptr, ..
4849 //
4850 // When the first GEP operand is a single pointer - it is the uniform base we
4851 // are looking for. If first operand of the GEP is a splat vector - we
4852 // extract the splat value and use it as a uniform base.
4853 // In all other cases the function returns 'false'.
4854 static bool getUniformBase(const Value *Ptr, SDValue &Base, SDValue &Index,
4855                            ISD::MemIndexType &IndexType, SDValue &Scale,
4856                            SelectionDAGBuilder *SDB, const BasicBlock *CurBB,
4857                            uint64_t ElemSize) {
4858   SelectionDAG& DAG = SDB->DAG;
4859   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4860   const DataLayout &DL = DAG.getDataLayout();
4861 
4862   assert(Ptr->getType()->isVectorTy() && "Unexpected pointer type");
4863 
4864   // Handle splat constant pointer.
4865   if (auto *C = dyn_cast<Constant>(Ptr)) {
4866     C = C->getSplatValue();
4867     if (!C)
4868       return false;
4869 
4870     Base = SDB->getValue(C);
4871 
4872     ElementCount NumElts = cast<VectorType>(Ptr->getType())->getElementCount();
4873     EVT VT = EVT::getVectorVT(*DAG.getContext(), TLI.getPointerTy(DL), NumElts);
4874     Index = DAG.getConstant(0, SDB->getCurSDLoc(), VT);
4875     IndexType = ISD::SIGNED_SCALED;
4876     Scale = DAG.getTargetConstant(1, SDB->getCurSDLoc(), TLI.getPointerTy(DL));
4877     return true;
4878   }
4879 
4880   const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
4881   if (!GEP || GEP->getParent() != CurBB)
4882     return false;
4883 
4884   if (GEP->getNumOperands() != 2)
4885     return false;
4886 
4887   const Value *BasePtr = GEP->getPointerOperand();
4888   const Value *IndexVal = GEP->getOperand(GEP->getNumOperands() - 1);
4889 
4890   // Make sure the base is scalar and the index is a vector.
4891   if (BasePtr->getType()->isVectorTy() || !IndexVal->getType()->isVectorTy())
4892     return false;
4893 
4894   TypeSize ScaleVal = DL.getTypeAllocSize(GEP->getResultElementType());
4895   if (ScaleVal.isScalable())
4896     return false;
4897 
4898   // Target may not support the required addressing mode.
4899   if (ScaleVal != 1 &&
4900       !TLI.isLegalScaleForGatherScatter(ScaleVal.getFixedValue(), ElemSize))
4901     return false;
4902 
4903   Base = SDB->getValue(BasePtr);
4904   Index = SDB->getValue(IndexVal);
4905   IndexType = ISD::SIGNED_SCALED;
4906 
4907   Scale =
4908       DAG.getTargetConstant(ScaleVal, SDB->getCurSDLoc(), TLI.getPointerTy(DL));
4909   return true;
4910 }
4911 
4912 void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
4913   SDLoc sdl = getCurSDLoc();
4914 
4915   // llvm.masked.scatter.*(Src0, Ptrs, alignment, Mask)
4916   const Value *Ptr = I.getArgOperand(1);
4917   SDValue Src0 = getValue(I.getArgOperand(0));
4918   SDValue Mask = getValue(I.getArgOperand(3));
4919   EVT VT = Src0.getValueType();
4920   Align Alignment = cast<ConstantInt>(I.getArgOperand(2))
4921                         ->getMaybeAlignValue()
4922                         .value_or(DAG.getEVTAlign(VT.getScalarType()));
4923   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4924 
4925   SDValue Base;
4926   SDValue Index;
4927   ISD::MemIndexType IndexType;
4928   SDValue Scale;
4929   bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this,
4930                                     I.getParent(), VT.getScalarStoreSize());
4931 
4932   unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
4933   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4934       MachinePointerInfo(AS), MachineMemOperand::MOStore,
4935       LocationSize::beforeOrAfterPointer(), Alignment, I.getAAMetadata());
4936   if (!UniformBase) {
4937     Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4938     Index = getValue(Ptr);
4939     IndexType = ISD::SIGNED_SCALED;
4940     Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4941   }
4942 
4943   EVT IdxVT = Index.getValueType();
4944   EVT EltTy = IdxVT.getVectorElementType();
4945   if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
4946     EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
4947     Index = DAG.getNode(ISD::SIGN_EXTEND, sdl, NewIdxVT, Index);
4948   }
4949 
4950   SDValue Ops[] = { getMemoryRoot(), Src0, Mask, Base, Index, Scale };
4951   SDValue Scatter = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), VT, sdl,
4952                                          Ops, MMO, IndexType, false);
4953   DAG.setRoot(Scatter);
4954   setValue(&I, Scatter);
4955 }
4956 
4957 void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) {
4958   SDLoc sdl = getCurSDLoc();
4959 
4960   auto getMaskedLoadOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4961                               Align &Alignment) {
4962     // @llvm.masked.load.*(Ptr, alignment, Mask, Src0)
4963     Ptr = I.getArgOperand(0);
4964     Alignment = cast<ConstantInt>(I.getArgOperand(1))->getAlignValue();
4965     Mask = I.getArgOperand(2);
4966     Src0 = I.getArgOperand(3);
4967   };
4968   auto getExpandingLoadOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4969                                  Align &Alignment) {
4970     // @llvm.masked.expandload.*(Ptr, Mask, Src0)
4971     Ptr = I.getArgOperand(0);
4972     Alignment = I.getParamAlign(0).valueOrOne();
4973     Mask = I.getArgOperand(1);
4974     Src0 = I.getArgOperand(2);
4975   };
4976 
4977   Value  *PtrOperand, *MaskOperand, *Src0Operand;
4978   Align Alignment;
4979   if (IsExpanding)
4980     getExpandingLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4981   else
4982     getMaskedLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4983 
4984   SDValue Ptr = getValue(PtrOperand);
4985   SDValue Src0 = getValue(Src0Operand);
4986   SDValue Mask = getValue(MaskOperand);
4987   SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
4988 
4989   EVT VT = Src0.getValueType();
4990   AAMDNodes AAInfo = I.getAAMetadata();
4991   const MDNode *Ranges = getRangeMetadata(I);
4992 
4993   // Do not serialize masked loads of constant memory with anything.
4994   MemoryLocation ML = MemoryLocation::getAfter(PtrOperand, AAInfo);
4995   bool AddToChain = !AA || !AA->pointsToConstantMemory(ML);
4996 
4997   SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
4998 
4999   auto MMOFlags = MachineMemOperand::MOLoad;
5000   if (I.hasMetadata(LLVMContext::MD_nontemporal))
5001     MMOFlags |= MachineMemOperand::MONonTemporal;
5002 
5003   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
5004       MachinePointerInfo(PtrOperand), MMOFlags,
5005       LocationSize::beforeOrAfterPointer(), Alignment, AAInfo, Ranges);
5006 
5007   const auto &TLI = DAG.getTargetLoweringInfo();
5008   const auto &TTI =
5009       TLI.getTargetMachine().getTargetTransformInfo(*I.getFunction());
5010   // The Load/Res may point to different values and both of them are output
5011   // variables.
5012   SDValue Load;
5013   SDValue Res;
5014   if (!IsExpanding &&
5015       TTI.hasConditionalLoadStoreForType(Src0Operand->getType()))
5016     Res = TLI.visitMaskedLoad(DAG, sdl, InChain, MMO, Load, Ptr, Src0, Mask);
5017   else
5018     Res = Load =
5019         DAG.getMaskedLoad(VT, sdl, InChain, Ptr, Offset, Mask, Src0, VT, MMO,
5020                           ISD::UNINDEXED, ISD::NON_EXTLOAD, IsExpanding);
5021   if (AddToChain)
5022     PendingLoads.push_back(Load.getValue(1));
5023   setValue(&I, Res);
5024 }
5025 
5026 void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
5027   SDLoc sdl = getCurSDLoc();
5028 
5029   // @llvm.masked.gather.*(Ptrs, alignment, Mask, Src0)
5030   const Value *Ptr = I.getArgOperand(0);
5031   SDValue Src0 = getValue(I.getArgOperand(3));
5032   SDValue Mask = getValue(I.getArgOperand(2));
5033 
5034   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5035   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5036   Align Alignment = cast<ConstantInt>(I.getArgOperand(1))
5037                         ->getMaybeAlignValue()
5038                         .value_or(DAG.getEVTAlign(VT.getScalarType()));
5039 
5040   const MDNode *Ranges = getRangeMetadata(I);
5041 
5042   SDValue Root = DAG.getRoot();
5043   SDValue Base;
5044   SDValue Index;
5045   ISD::MemIndexType IndexType;
5046   SDValue Scale;
5047   bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this,
5048                                     I.getParent(), VT.getScalarStoreSize());
5049   unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
5050   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
5051       MachinePointerInfo(AS), MachineMemOperand::MOLoad,
5052       LocationSize::beforeOrAfterPointer(), Alignment, I.getAAMetadata(),
5053       Ranges);
5054 
5055   if (!UniformBase) {
5056     Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
5057     Index = getValue(Ptr);
5058     IndexType = ISD::SIGNED_SCALED;
5059     Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
5060   }
5061 
5062   EVT IdxVT = Index.getValueType();
5063   EVT EltTy = IdxVT.getVectorElementType();
5064   if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
5065     EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
5066     Index = DAG.getNode(ISD::SIGN_EXTEND, sdl, NewIdxVT, Index);
5067   }
5068 
5069   SDValue Ops[] = { Root, Src0, Mask, Base, Index, Scale };
5070   SDValue Gather = DAG.getMaskedGather(DAG.getVTList(VT, MVT::Other), VT, sdl,
5071                                        Ops, MMO, IndexType, ISD::NON_EXTLOAD);
5072 
5073   PendingLoads.push_back(Gather.getValue(1));
5074   setValue(&I, Gather);
5075 }
5076 
5077 void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
5078   SDLoc dl = getCurSDLoc();
5079   AtomicOrdering SuccessOrdering = I.getSuccessOrdering();
5080   AtomicOrdering FailureOrdering = I.getFailureOrdering();
5081   SyncScope::ID SSID = I.getSyncScopeID();
5082 
5083   SDValue InChain = getRoot();
5084 
5085   MVT MemVT = getValue(I.getCompareOperand()).getSimpleValueType();
5086   SDVTList VTs = DAG.getVTList(MemVT, MVT::i1, MVT::Other);
5087 
5088   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5089   auto Flags = TLI.getAtomicMemOperandFlags(I, DAG.getDataLayout());
5090 
5091   MachineFunction &MF = DAG.getMachineFunction();
5092   MachineMemOperand *MMO = MF.getMachineMemOperand(
5093       MachinePointerInfo(I.getPointerOperand()), Flags,
5094       LocationSize::precise(MemVT.getStoreSize()), DAG.getEVTAlign(MemVT),
5095       AAMDNodes(), nullptr, SSID, SuccessOrdering, FailureOrdering);
5096 
5097   SDValue L = DAG.getAtomicCmpSwap(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS,
5098                                    dl, MemVT, VTs, InChain,
5099                                    getValue(I.getPointerOperand()),
5100                                    getValue(I.getCompareOperand()),
5101                                    getValue(I.getNewValOperand()), MMO);
5102 
5103   SDValue OutChain = L.getValue(2);
5104 
5105   setValue(&I, L);
5106   DAG.setRoot(OutChain);
5107 }
5108 
5109 void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
5110   SDLoc dl = getCurSDLoc();
5111   ISD::NodeType NT;
5112   switch (I.getOperation()) {
5113   default: llvm_unreachable("Unknown atomicrmw operation");
5114   case AtomicRMWInst::Xchg: NT = ISD::ATOMIC_SWAP; break;
5115   case AtomicRMWInst::Add:  NT = ISD::ATOMIC_LOAD_ADD; break;
5116   case AtomicRMWInst::Sub:  NT = ISD::ATOMIC_LOAD_SUB; break;
5117   case AtomicRMWInst::And:  NT = ISD::ATOMIC_LOAD_AND; break;
5118   case AtomicRMWInst::Nand: NT = ISD::ATOMIC_LOAD_NAND; break;
5119   case AtomicRMWInst::Or:   NT = ISD::ATOMIC_LOAD_OR; break;
5120   case AtomicRMWInst::Xor:  NT = ISD::ATOMIC_LOAD_XOR; break;
5121   case AtomicRMWInst::Max:  NT = ISD::ATOMIC_LOAD_MAX; break;
5122   case AtomicRMWInst::Min:  NT = ISD::ATOMIC_LOAD_MIN; break;
5123   case AtomicRMWInst::UMax: NT = ISD::ATOMIC_LOAD_UMAX; break;
5124   case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break;
5125   case AtomicRMWInst::FAdd: NT = ISD::ATOMIC_LOAD_FADD; break;
5126   case AtomicRMWInst::FSub: NT = ISD::ATOMIC_LOAD_FSUB; break;
5127   case AtomicRMWInst::FMax: NT = ISD::ATOMIC_LOAD_FMAX; break;
5128   case AtomicRMWInst::FMin: NT = ISD::ATOMIC_LOAD_FMIN; break;
5129   case AtomicRMWInst::UIncWrap:
5130     NT = ISD::ATOMIC_LOAD_UINC_WRAP;
5131     break;
5132   case AtomicRMWInst::UDecWrap:
5133     NT = ISD::ATOMIC_LOAD_UDEC_WRAP;
5134     break;
5135   case AtomicRMWInst::USubCond:
5136     NT = ISD::ATOMIC_LOAD_USUB_COND;
5137     break;
5138   case AtomicRMWInst::USubSat:
5139     NT = ISD::ATOMIC_LOAD_USUB_SAT;
5140     break;
5141   }
5142   AtomicOrdering Ordering = I.getOrdering();
5143   SyncScope::ID SSID = I.getSyncScopeID();
5144 
5145   SDValue InChain = getRoot();
5146 
5147   auto MemVT = getValue(I.getValOperand()).getSimpleValueType();
5148   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5149   auto Flags = TLI.getAtomicMemOperandFlags(I, DAG.getDataLayout());
5150 
5151   MachineFunction &MF = DAG.getMachineFunction();
5152   MachineMemOperand *MMO = MF.getMachineMemOperand(
5153       MachinePointerInfo(I.getPointerOperand()), Flags,
5154       LocationSize::precise(MemVT.getStoreSize()), DAG.getEVTAlign(MemVT),
5155       AAMDNodes(), nullptr, SSID, Ordering);
5156 
5157   SDValue L =
5158     DAG.getAtomic(NT, dl, MemVT, InChain,
5159                   getValue(I.getPointerOperand()), getValue(I.getValOperand()),
5160                   MMO);
5161 
5162   SDValue OutChain = L.getValue(1);
5163 
5164   setValue(&I, L);
5165   DAG.setRoot(OutChain);
5166 }
5167 
5168 void SelectionDAGBuilder::visitFence(const FenceInst &I) {
5169   SDLoc dl = getCurSDLoc();
5170   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5171   SDValue Ops[3];
5172   Ops[0] = getRoot();
5173   Ops[1] = DAG.getTargetConstant((unsigned)I.getOrdering(), dl,
5174                                  TLI.getFenceOperandTy(DAG.getDataLayout()));
5175   Ops[2] = DAG.getTargetConstant(I.getSyncScopeID(), dl,
5176                                  TLI.getFenceOperandTy(DAG.getDataLayout()));
5177   SDValue N = DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops);
5178   setValue(&I, N);
5179   DAG.setRoot(N);
5180 }
5181 
5182 void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
5183   SDLoc dl = getCurSDLoc();
5184   AtomicOrdering Order = I.getOrdering();
5185   SyncScope::ID SSID = I.getSyncScopeID();
5186 
5187   SDValue InChain = getRoot();
5188 
5189   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5190   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5191   EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getType());
5192 
5193   if (!TLI.supportsUnalignedAtomics() &&
5194       I.getAlign().value() < MemVT.getSizeInBits() / 8)
5195     report_fatal_error("Cannot generate unaligned atomic load");
5196 
5197   auto Flags = TLI.getLoadMemOperandFlags(I, DAG.getDataLayout(), AC, LibInfo);
5198 
5199   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
5200       MachinePointerInfo(I.getPointerOperand()), Flags,
5201       LocationSize::precise(MemVT.getStoreSize()), I.getAlign(), AAMDNodes(),
5202       nullptr, SSID, Order);
5203 
5204   InChain = TLI.prepareVolatileOrAtomicLoad(InChain, dl, DAG);
5205 
5206   SDValue Ptr = getValue(I.getPointerOperand());
5207   SDValue L = DAG.getAtomic(ISD::ATOMIC_LOAD, dl, MemVT, MemVT, InChain,
5208                             Ptr, MMO);
5209 
5210   SDValue OutChain = L.getValue(1);
5211   if (MemVT != VT)
5212     L = DAG.getPtrExtOrTrunc(L, dl, VT);
5213 
5214   setValue(&I, L);
5215   DAG.setRoot(OutChain);
5216 }
5217 
5218 void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
5219   SDLoc dl = getCurSDLoc();
5220 
5221   AtomicOrdering Ordering = I.getOrdering();
5222   SyncScope::ID SSID = I.getSyncScopeID();
5223 
5224   SDValue InChain = getRoot();
5225 
5226   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5227   EVT MemVT =
5228       TLI.getMemValueType(DAG.getDataLayout(), I.getValueOperand()->getType());
5229 
5230   if (!TLI.supportsUnalignedAtomics() &&
5231       I.getAlign().value() < MemVT.getSizeInBits() / 8)
5232     report_fatal_error("Cannot generate unaligned atomic store");
5233 
5234   auto Flags = TLI.getStoreMemOperandFlags(I, DAG.getDataLayout());
5235 
5236   MachineFunction &MF = DAG.getMachineFunction();
5237   MachineMemOperand *MMO = MF.getMachineMemOperand(
5238       MachinePointerInfo(I.getPointerOperand()), Flags,
5239       LocationSize::precise(MemVT.getStoreSize()), I.getAlign(), AAMDNodes(),
5240       nullptr, SSID, Ordering);
5241 
5242   SDValue Val = getValue(I.getValueOperand());
5243   if (Val.getValueType() != MemVT)
5244     Val = DAG.getPtrExtOrTrunc(Val, dl, MemVT);
5245   SDValue Ptr = getValue(I.getPointerOperand());
5246 
5247   SDValue OutChain =
5248       DAG.getAtomic(ISD::ATOMIC_STORE, dl, MemVT, InChain, Val, Ptr, MMO);
5249 
5250   setValue(&I, OutChain);
5251   DAG.setRoot(OutChain);
5252 }
5253 
5254 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
5255 /// node.
5256 void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
5257                                                unsigned Intrinsic) {
5258   // Ignore the callsite's attributes. A specific call site may be marked with
5259   // readnone, but the lowering code will expect the chain based on the
5260   // definition.
5261   const Function *F = I.getCalledFunction();
5262   bool HasChain = !F->doesNotAccessMemory();
5263   bool OnlyLoad =
5264       HasChain && F->onlyReadsMemory() && F->willReturn() && F->doesNotThrow();
5265 
5266   // Build the operand list.
5267   SmallVector<SDValue, 8> Ops;
5268   if (HasChain) {  // If this intrinsic has side-effects, chainify it.
5269     if (OnlyLoad) {
5270       // We don't need to serialize loads against other loads.
5271       Ops.push_back(DAG.getRoot());
5272     } else {
5273       Ops.push_back(getRoot());
5274     }
5275   }
5276 
5277   // Info is set by getTgtMemIntrinsic
5278   TargetLowering::IntrinsicInfo Info;
5279   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5280   bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I,
5281                                                DAG.getMachineFunction(),
5282                                                Intrinsic);
5283 
5284   // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
5285   if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID ||
5286       Info.opc == ISD::INTRINSIC_W_CHAIN)
5287     Ops.push_back(DAG.getTargetConstant(Intrinsic, getCurSDLoc(),
5288                                         TLI.getPointerTy(DAG.getDataLayout())));
5289 
5290   // Add all operands of the call to the operand list.
5291   for (unsigned i = 0, e = I.arg_size(); i != e; ++i) {
5292     const Value *Arg = I.getArgOperand(i);
5293     if (!I.paramHasAttr(i, Attribute::ImmArg)) {
5294       Ops.push_back(getValue(Arg));
5295       continue;
5296     }
5297 
5298     // Use TargetConstant instead of a regular constant for immarg.
5299     EVT VT = TLI.getValueType(DAG.getDataLayout(), Arg->getType(), true);
5300     if (const ConstantInt *CI = dyn_cast<ConstantInt>(Arg)) {
5301       assert(CI->getBitWidth() <= 64 &&
5302              "large intrinsic immediates not handled");
5303       Ops.push_back(DAG.getTargetConstant(*CI, SDLoc(), VT));
5304     } else {
5305       Ops.push_back(
5306           DAG.getTargetConstantFP(*cast<ConstantFP>(Arg), SDLoc(), VT));
5307     }
5308   }
5309 
5310   SmallVector<EVT, 4> ValueVTs;
5311   ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);
5312 
5313   if (HasChain)
5314     ValueVTs.push_back(MVT::Other);
5315 
5316   SDVTList VTs = DAG.getVTList(ValueVTs);
5317 
5318   // Propagate fast-math-flags from IR to node(s).
5319   SDNodeFlags Flags;
5320   if (auto *FPMO = dyn_cast<FPMathOperator>(&I))
5321     Flags.copyFMF(*FPMO);
5322   SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
5323 
5324   // Create the node.
5325   SDValue Result;
5326 
5327   if (auto Bundle = I.getOperandBundle(LLVMContext::OB_convergencectrl)) {
5328     auto *Token = Bundle->Inputs[0].get();
5329     SDValue ConvControlToken = getValue(Token);
5330     assert(Ops.back().getValueType() != MVT::Glue &&
5331            "Did not expected another glue node here.");
5332     ConvControlToken =
5333         DAG.getNode(ISD::CONVERGENCECTRL_GLUE, {}, MVT::Glue, ConvControlToken);
5334     Ops.push_back(ConvControlToken);
5335   }
5336 
5337   // In some cases, custom collection of operands from CallInst I may be needed.
5338   TLI.CollectTargetIntrinsicOperands(I, Ops, DAG);
5339   if (IsTgtIntrinsic) {
5340     // This is target intrinsic that touches memory
5341     //
5342     // TODO: We currently just fallback to address space 0 if getTgtMemIntrinsic
5343     //       didn't yield anything useful.
5344     MachinePointerInfo MPI;
5345     if (Info.ptrVal)
5346       MPI = MachinePointerInfo(Info.ptrVal, Info.offset);
5347     else if (Info.fallbackAddressSpace)
5348       MPI = MachinePointerInfo(*Info.fallbackAddressSpace);
5349     Result = DAG.getMemIntrinsicNode(Info.opc, getCurSDLoc(), VTs, Ops,
5350                                      Info.memVT, MPI, Info.align, Info.flags,
5351                                      Info.size, I.getAAMetadata());
5352   } else if (!HasChain) {
5353     Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurSDLoc(), VTs, Ops);
5354   } else if (!I.getType()->isVoidTy()) {
5355     Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurSDLoc(), VTs, Ops);
5356   } else {
5357     Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops);
5358   }
5359 
5360   if (HasChain) {
5361     SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
5362     if (OnlyLoad)
5363       PendingLoads.push_back(Chain);
5364     else
5365       DAG.setRoot(Chain);
5366   }
5367 
5368   if (!I.getType()->isVoidTy()) {
5369     if (!isa<VectorType>(I.getType()))
5370       Result = lowerRangeToAssertZExt(DAG, I, Result);
5371 
5372     MaybeAlign Alignment = I.getRetAlign();
5373 
5374     // Insert `assertalign` node if there's an alignment.
5375     if (InsertAssertAlign && Alignment) {
5376       Result =
5377           DAG.getAssertAlign(getCurSDLoc(), Result, Alignment.valueOrOne());
5378     }
5379   }
5380 
5381   setValue(&I, Result);
5382 }
5383 
5384 /// GetSignificand - Get the significand and build it into a floating-point
5385 /// number with exponent of 1:
5386 ///
5387 ///   Op = (Op & 0x007fffff) | 0x3f800000;
5388 ///
5389 /// where Op is the hexadecimal representation of floating point value.
5390 static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl) {
5391   SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
5392                            DAG.getConstant(0x007fffff, dl, MVT::i32));
5393   SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
5394                            DAG.getConstant(0x3f800000, dl, MVT::i32));
5395   return DAG.getNode(ISD::BITCAST, dl, MVT::f32, t2);
5396 }
5397 
5398 /// GetExponent - Get the exponent:
5399 ///
5400 ///   (float)(int)(((Op & 0x7f800000) >> 23) - 127);
5401 ///
5402 /// where Op is the hexadecimal representation of floating point value.
5403 static SDValue GetExponent(SelectionDAG &DAG, SDValue Op,
5404                            const TargetLowering &TLI, const SDLoc &dl) {
5405   SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
5406                            DAG.getConstant(0x7f800000, dl, MVT::i32));
5407   SDValue t1 = DAG.getNode(
5408       ISD::SRL, dl, MVT::i32, t0,
5409       DAG.getConstant(23, dl,
5410                       TLI.getShiftAmountTy(MVT::i32, DAG.getDataLayout())));
5411   SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
5412                            DAG.getConstant(127, dl, MVT::i32));
5413   return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
5414 }
5415 
5416 /// getF32Constant - Get 32-bit floating point constant.
5417 static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt,
5418                               const SDLoc &dl) {
5419   return DAG.getConstantFP(APFloat(APFloat::IEEEsingle(), APInt(32, Flt)), dl,
5420                            MVT::f32);
5421 }
5422 
5423 static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl,
5424                                        SelectionDAG &DAG) {
5425   // TODO: What fast-math-flags should be set on the floating-point nodes?
5426 
5427   //   IntegerPartOfX = ((int32_t)(t0);
5428   SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
5429 
5430   //   FractionalPartOfX = t0 - (float)IntegerPartOfX;
5431   SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
5432   SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
5433 
5434   //   IntegerPartOfX <<= 23;
5435   IntegerPartOfX =
5436       DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
5437                   DAG.getConstant(23, dl,
5438                                   DAG.getTargetLoweringInfo().getShiftAmountTy(
5439                                       MVT::i32, DAG.getDataLayout())));
5440 
5441   SDValue TwoToFractionalPartOfX;
5442   if (LimitFloatPrecision <= 6) {
5443     // For floating-point precision of 6:
5444     //
5445     //   TwoToFractionalPartOfX =
5446     //     0.997535578f +
5447     //       (0.735607626f + 0.252464424f * x) * x;
5448     //
5449     // error 0.0144103317, which is 6 bits
5450     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5451                              getF32Constant(DAG, 0x3e814304, dl));
5452     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5453                              getF32Constant(DAG, 0x3f3c50c8, dl));
5454     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5455     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5456                                          getF32Constant(DAG, 0x3f7f5e7e, dl));
5457   } else if (LimitFloatPrecision <= 12) {
5458     // For floating-point precision of 12:
5459     //
5460     //   TwoToFractionalPartOfX =
5461     //     0.999892986f +
5462     //       (0.696457318f +
5463     //         (0.224338339f + 0.792043434e-1f * x) * x) * x;
5464     //
5465     // error 0.000107046256, which is 13 to 14 bits
5466     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5467                              getF32Constant(DAG, 0x3da235e3, dl));
5468     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5469                              getF32Constant(DAG, 0x3e65b8f3, dl));
5470     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5471     SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5472                              getF32Constant(DAG, 0x3f324b07, dl));
5473     SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5474     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5475                                          getF32Constant(DAG, 0x3f7ff8fd, dl));
5476   } else { // LimitFloatPrecision <= 18
5477     // For floating-point precision of 18:
5478     //
5479     //   TwoToFractionalPartOfX =
5480     //     0.999999982f +
5481     //       (0.693148872f +
5482     //         (0.240227044f +
5483     //           (0.554906021e-1f +
5484     //             (0.961591928e-2f +
5485     //               (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
5486     // error 2.47208000*10^(-7), which is better than 18 bits
5487     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5488                              getF32Constant(DAG, 0x3924b03e, dl));
5489     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5490                              getF32Constant(DAG, 0x3ab24b87, dl));
5491     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5492     SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5493                              getF32Constant(DAG, 0x3c1d8c17, dl));
5494     SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5495     SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5496                              getF32Constant(DAG, 0x3d634a1d, dl));
5497     SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5498     SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5499                              getF32Constant(DAG, 0x3e75fe14, dl));
5500     SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5501     SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
5502                               getF32Constant(DAG, 0x3f317234, dl));
5503     SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
5504     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
5505                                          getF32Constant(DAG, 0x3f800000, dl));
5506   }
5507 
5508   // Add the exponent into the result in integer domain.
5509   SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, TwoToFractionalPartOfX);
5510   return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
5511                      DAG.getNode(ISD::ADD, dl, MVT::i32, t13, IntegerPartOfX));
5512 }
5513 
5514 /// expandExp - Lower an exp intrinsic. Handles the special sequences for
5515 /// limited-precision mode.
5516 static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5517                          const TargetLowering &TLI, SDNodeFlags Flags) {
5518   if (Op.getValueType() == MVT::f32 &&
5519       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5520 
5521     // Put the exponent in the right bit position for later addition to the
5522     // final result:
5523     //
5524     // t0 = Op * log2(e)
5525 
5526     // TODO: What fast-math-flags should be set here?
5527     SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
5528                              DAG.getConstantFP(numbers::log2ef, dl, MVT::f32));
5529     return getLimitedPrecisionExp2(t0, dl, DAG);
5530   }
5531 
5532   // No special expansion.
5533   return DAG.getNode(ISD::FEXP, dl, Op.getValueType(), Op, Flags);
5534 }
5535 
5536 /// expandLog - Lower a log intrinsic. Handles the special sequences for
5537 /// limited-precision mode.
5538 static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5539                          const TargetLowering &TLI, SDNodeFlags Flags) {
5540   // TODO: What fast-math-flags should be set on the floating-point nodes?
5541 
5542   if (Op.getValueType() == MVT::f32 &&
5543       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5544     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5545 
5546     // Scale the exponent by log(2).
5547     SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
5548     SDValue LogOfExponent =
5549         DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
5550                     DAG.getConstantFP(numbers::ln2f, dl, MVT::f32));
5551 
5552     // Get the significand and build it into a floating-point number with
5553     // exponent of 1.
5554     SDValue X = GetSignificand(DAG, Op1, dl);
5555 
5556     SDValue LogOfMantissa;
5557     if (LimitFloatPrecision <= 6) {
5558       // For floating-point precision of 6:
5559       //
5560       //   LogofMantissa =
5561       //     -1.1609546f +
5562       //       (1.4034025f - 0.23903021f * x) * x;
5563       //
5564       // error 0.0034276066, which is better than 8 bits
5565       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5566                                getF32Constant(DAG, 0xbe74c456, dl));
5567       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5568                                getF32Constant(DAG, 0x3fb3a2b1, dl));
5569       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5570       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5571                                   getF32Constant(DAG, 0x3f949a29, dl));
5572     } else if (LimitFloatPrecision <= 12) {
5573       // For floating-point precision of 12:
5574       //
5575       //   LogOfMantissa =
5576       //     -1.7417939f +
5577       //       (2.8212026f +
5578       //         (-1.4699568f +
5579       //           (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
5580       //
5581       // error 0.000061011436, which is 14 bits
5582       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5583                                getF32Constant(DAG, 0xbd67b6d6, dl));
5584       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5585                                getF32Constant(DAG, 0x3ee4f4b8, dl));
5586       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5587       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5588                                getF32Constant(DAG, 0x3fbc278b, dl));
5589       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5590       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5591                                getF32Constant(DAG, 0x40348e95, dl));
5592       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5593       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5594                                   getF32Constant(DAG, 0x3fdef31a, dl));
5595     } else { // LimitFloatPrecision <= 18
5596       // For floating-point precision of 18:
5597       //
5598       //   LogOfMantissa =
5599       //     -2.1072184f +
5600       //       (4.2372794f +
5601       //         (-3.7029485f +
5602       //           (2.2781945f +
5603       //             (-0.87823314f +
5604       //               (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
5605       //
5606       // error 0.0000023660568, which is better than 18 bits
5607       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5608                                getF32Constant(DAG, 0xbc91e5ac, dl));
5609       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5610                                getF32Constant(DAG, 0x3e4350aa, dl));
5611       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5612       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5613                                getF32Constant(DAG, 0x3f60d3e3, dl));
5614       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5615       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5616                                getF32Constant(DAG, 0x4011cdf0, dl));
5617       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5618       SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5619                                getF32Constant(DAG, 0x406cfd1c, dl));
5620       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5621       SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5622                                getF32Constant(DAG, 0x408797cb, dl));
5623       SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5624       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
5625                                   getF32Constant(DAG, 0x4006dcab, dl));
5626     }
5627 
5628     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, LogOfMantissa);
5629   }
5630 
5631   // No special expansion.
5632   return DAG.getNode(ISD::FLOG, dl, Op.getValueType(), Op, Flags);
5633 }
5634 
5635 /// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for
5636 /// limited-precision mode.
5637 static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5638                           const TargetLowering &TLI, SDNodeFlags Flags) {
5639   // TODO: What fast-math-flags should be set on the floating-point nodes?
5640 
5641   if (Op.getValueType() == MVT::f32 &&
5642       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5643     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5644 
5645     // Get the exponent.
5646     SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
5647 
5648     // Get the significand and build it into a floating-point number with
5649     // exponent of 1.
5650     SDValue X = GetSignificand(DAG, Op1, dl);
5651 
5652     // Different possible minimax approximations of significand in
5653     // floating-point for various degrees of accuracy over [1,2].
5654     SDValue Log2ofMantissa;
5655     if (LimitFloatPrecision <= 6) {
5656       // For floating-point precision of 6:
5657       //
5658       //   Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
5659       //
5660       // error 0.0049451742, which is more than 7 bits
5661       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5662                                getF32Constant(DAG, 0xbeb08fe0, dl));
5663       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5664                                getF32Constant(DAG, 0x40019463, dl));
5665       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5666       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5667                                    getF32Constant(DAG, 0x3fd6633d, dl));
5668     } else if (LimitFloatPrecision <= 12) {
5669       // For floating-point precision of 12:
5670       //
5671       //   Log2ofMantissa =
5672       //     -2.51285454f +
5673       //       (4.07009056f +
5674       //         (-2.12067489f +
5675       //           (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
5676       //
5677       // error 0.0000876136000, which is better than 13 bits
5678       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5679                                getF32Constant(DAG, 0xbda7262e, dl));
5680       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5681                                getF32Constant(DAG, 0x3f25280b, dl));
5682       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5683       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5684                                getF32Constant(DAG, 0x4007b923, dl));
5685       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5686       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5687                                getF32Constant(DAG, 0x40823e2f, dl));
5688       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5689       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5690                                    getF32Constant(DAG, 0x4020d29c, dl));
5691     } else { // LimitFloatPrecision <= 18
5692       // For floating-point precision of 18:
5693       //
5694       //   Log2ofMantissa =
5695       //     -3.0400495f +
5696       //       (6.1129976f +
5697       //         (-5.3420409f +
5698       //           (3.2865683f +
5699       //             (-1.2669343f +
5700       //               (0.27515199f -
5701       //                 0.25691327e-1f * x) * x) * x) * x) * x) * x;
5702       //
5703       // error 0.0000018516, which is better than 18 bits
5704       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5705                                getF32Constant(DAG, 0xbcd2769e, dl));
5706       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5707                                getF32Constant(DAG, 0x3e8ce0b9, dl));
5708       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5709       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5710                                getF32Constant(DAG, 0x3fa22ae7, dl));
5711       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5712       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5713                                getF32Constant(DAG, 0x40525723, dl));
5714       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5715       SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5716                                getF32Constant(DAG, 0x40aaf200, dl));
5717       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5718       SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5719                                getF32Constant(DAG, 0x40c39dad, dl));
5720       SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5721       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
5722                                    getF32Constant(DAG, 0x4042902c, dl));
5723     }
5724 
5725     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
5726   }
5727 
5728   // No special expansion.
5729   return DAG.getNode(ISD::FLOG2, dl, Op.getValueType(), Op, Flags);
5730 }
5731 
5732 /// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for
5733 /// limited-precision mode.
5734 static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5735                            const TargetLowering &TLI, SDNodeFlags Flags) {
5736   // TODO: What fast-math-flags should be set on the floating-point nodes?
5737 
5738   if (Op.getValueType() == MVT::f32 &&
5739       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5740     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5741 
5742     // Scale the exponent by log10(2) [0.30102999f].
5743     SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
5744     SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
5745                                         getF32Constant(DAG, 0x3e9a209a, dl));
5746 
5747     // Get the significand and build it into a floating-point number with
5748     // exponent of 1.
5749     SDValue X = GetSignificand(DAG, Op1, dl);
5750 
5751     SDValue Log10ofMantissa;
5752     if (LimitFloatPrecision <= 6) {
5753       // For floating-point precision of 6:
5754       //
5755       //   Log10ofMantissa =
5756       //     -0.50419619f +
5757       //       (0.60948995f - 0.10380950f * x) * x;
5758       //
5759       // error 0.0014886165, which is 6 bits
5760       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5761                                getF32Constant(DAG, 0xbdd49a13, dl));
5762       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5763                                getF32Constant(DAG, 0x3f1c0789, dl));
5764       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5765       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5766                                     getF32Constant(DAG, 0x3f011300, dl));
5767     } else if (LimitFloatPrecision <= 12) {
5768       // For floating-point precision of 12:
5769       //
5770       //   Log10ofMantissa =
5771       //     -0.64831180f +
5772       //       (0.91751397f +
5773       //         (-0.31664806f + 0.47637168e-1f * x) * x) * x;
5774       //
5775       // error 0.00019228036, which is better than 12 bits
5776       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5777                                getF32Constant(DAG, 0x3d431f31, dl));
5778       SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
5779                                getF32Constant(DAG, 0x3ea21fb2, dl));
5780       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5781       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5782                                getF32Constant(DAG, 0x3f6ae232, dl));
5783       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5784       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
5785                                     getF32Constant(DAG, 0x3f25f7c3, dl));
5786     } else { // LimitFloatPrecision <= 18
5787       // For floating-point precision of 18:
5788       //
5789       //   Log10ofMantissa =
5790       //     -0.84299375f +
5791       //       (1.5327582f +
5792       //         (-1.0688956f +
5793       //           (0.49102474f +
5794       //             (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
5795       //
5796       // error 0.0000037995730, which is better than 18 bits
5797       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5798                                getF32Constant(DAG, 0x3c5d51ce, dl));
5799       SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
5800                                getF32Constant(DAG, 0x3e00685a, dl));
5801       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5802       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5803                                getF32Constant(DAG, 0x3efb6798, dl));
5804       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5805       SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
5806                                getF32Constant(DAG, 0x3f88d192, dl));
5807       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5808       SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5809                                getF32Constant(DAG, 0x3fc4316c, dl));
5810       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5811       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
5812                                     getF32Constant(DAG, 0x3f57ce70, dl));
5813     }
5814 
5815     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
5816   }
5817 
5818   // No special expansion.
5819   return DAG.getNode(ISD::FLOG10, dl, Op.getValueType(), Op, Flags);
5820 }
5821 
5822 /// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for
5823 /// limited-precision mode.
5824 static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5825                           const TargetLowering &TLI, SDNodeFlags Flags) {
5826   if (Op.getValueType() == MVT::f32 &&
5827       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18)
5828     return getLimitedPrecisionExp2(Op, dl, DAG);
5829 
5830   // No special expansion.
5831   return DAG.getNode(ISD::FEXP2, dl, Op.getValueType(), Op, Flags);
5832 }
5833 
5834 /// visitPow - Lower a pow intrinsic. Handles the special sequences for
5835 /// limited-precision mode with x == 10.0f.
5836 static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS,
5837                          SelectionDAG &DAG, const TargetLowering &TLI,
5838                          SDNodeFlags Flags) {
5839   bool IsExp10 = false;
5840   if (LHS.getValueType() == MVT::f32 && RHS.getValueType() == MVT::f32 &&
5841       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5842     if (ConstantFPSDNode *LHSC = dyn_cast<ConstantFPSDNode>(LHS)) {
5843       APFloat Ten(10.0f);
5844       IsExp10 = LHSC->isExactlyValue(Ten);
5845     }
5846   }
5847 
5848   // TODO: What fast-math-flags should be set on the FMUL node?
5849   if (IsExp10) {
5850     // Put the exponent in the right bit position for later addition to the
5851     // final result:
5852     //
5853     //   #define LOG2OF10 3.3219281f
5854     //   t0 = Op * LOG2OF10;
5855     SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, RHS,
5856                              getF32Constant(DAG, 0x40549a78, dl));
5857     return getLimitedPrecisionExp2(t0, dl, DAG);
5858   }
5859 
5860   // No special expansion.
5861   return DAG.getNode(ISD::FPOW, dl, LHS.getValueType(), LHS, RHS, Flags);
5862 }
5863 
5864 /// ExpandPowI - Expand a llvm.powi intrinsic.
5865 static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS,
5866                           SelectionDAG &DAG) {
5867   // If RHS is a constant, we can expand this out to a multiplication tree if
5868   // it's beneficial on the target, otherwise we end up lowering to a call to
5869   // __powidf2 (for example).
5870   if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
5871     unsigned Val = RHSC->getSExtValue();
5872 
5873     // powi(x, 0) -> 1.0
5874     if (Val == 0)
5875       return DAG.getConstantFP(1.0, DL, LHS.getValueType());
5876 
5877     if (DAG.getTargetLoweringInfo().isBeneficialToExpandPowI(
5878             Val, DAG.shouldOptForSize())) {
5879       // Get the exponent as a positive value.
5880       if ((int)Val < 0)
5881         Val = -Val;
5882       // We use the simple binary decomposition method to generate the multiply
5883       // sequence.  There are more optimal ways to do this (for example,
5884       // powi(x,15) generates one more multiply than it should), but this has
5885       // the benefit of being both really simple and much better than a libcall.
5886       SDValue Res; // Logically starts equal to 1.0
5887       SDValue CurSquare = LHS;
5888       // TODO: Intrinsics should have fast-math-flags that propagate to these
5889       // nodes.
5890       while (Val) {
5891         if (Val & 1) {
5892           if (Res.getNode())
5893             Res =
5894                 DAG.getNode(ISD::FMUL, DL, Res.getValueType(), Res, CurSquare);
5895           else
5896             Res = CurSquare; // 1.0*CurSquare.
5897         }
5898 
5899         CurSquare = DAG.getNode(ISD::FMUL, DL, CurSquare.getValueType(),
5900                                 CurSquare, CurSquare);
5901         Val >>= 1;
5902       }
5903 
5904       // If the original was negative, invert the result, producing 1/(x*x*x).
5905       if (RHSC->getSExtValue() < 0)
5906         Res = DAG.getNode(ISD::FDIV, DL, LHS.getValueType(),
5907                           DAG.getConstantFP(1.0, DL, LHS.getValueType()), Res);
5908       return Res;
5909     }
5910   }
5911 
5912   // Otherwise, expand to a libcall.
5913   return DAG.getNode(ISD::FPOWI, DL, LHS.getValueType(), LHS, RHS);
5914 }
5915 
5916 static SDValue expandDivFix(unsigned Opcode, const SDLoc &DL,
5917                             SDValue LHS, SDValue RHS, SDValue Scale,
5918                             SelectionDAG &DAG, const TargetLowering &TLI) {
5919   EVT VT = LHS.getValueType();
5920   bool Signed = Opcode == ISD::SDIVFIX || Opcode == ISD::SDIVFIXSAT;
5921   bool Saturating = Opcode == ISD::SDIVFIXSAT || Opcode == ISD::UDIVFIXSAT;
5922   LLVMContext &Ctx = *DAG.getContext();
5923 
5924   // If the type is legal but the operation isn't, this node might survive all
5925   // the way to operation legalization. If we end up there and we do not have
5926   // the ability to widen the type (if VT*2 is not legal), we cannot expand the
5927   // node.
5928 
5929   // Coax the legalizer into expanding the node during type legalization instead
5930   // by bumping the size by one bit. This will force it to Promote, enabling the
5931   // early expansion and avoiding the need to expand later.
5932 
5933   // We don't have to do this if Scale is 0; that can always be expanded, unless
5934   // it's a saturating signed operation. Those can experience true integer
5935   // division overflow, a case which we must avoid.
5936 
5937   // FIXME: We wouldn't have to do this (or any of the early
5938   // expansion/promotion) if it was possible to expand a libcall of an
5939   // illegal type during operation legalization. But it's not, so things
5940   // get a bit hacky.
5941   unsigned ScaleInt = Scale->getAsZExtVal();
5942   if ((ScaleInt > 0 || (Saturating && Signed)) &&
5943       (TLI.isTypeLegal(VT) ||
5944        (VT.isVector() && TLI.isTypeLegal(VT.getVectorElementType())))) {
5945     TargetLowering::LegalizeAction Action = TLI.getFixedPointOperationAction(
5946         Opcode, VT, ScaleInt);
5947     if (Action != TargetLowering::Legal && Action != TargetLowering::Custom) {
5948       EVT PromVT;
5949       if (VT.isScalarInteger())
5950         PromVT = EVT::getIntegerVT(Ctx, VT.getSizeInBits() + 1);
5951       else if (VT.isVector()) {
5952         PromVT = VT.getVectorElementType();
5953         PromVT = EVT::getIntegerVT(Ctx, PromVT.getSizeInBits() + 1);
5954         PromVT = EVT::getVectorVT(Ctx, PromVT, VT.getVectorElementCount());
5955       } else
5956         llvm_unreachable("Wrong VT for DIVFIX?");
5957       LHS = DAG.getExtOrTrunc(Signed, LHS, DL, PromVT);
5958       RHS = DAG.getExtOrTrunc(Signed, RHS, DL, PromVT);
5959       EVT ShiftTy = TLI.getShiftAmountTy(PromVT, DAG.getDataLayout());
5960       // For saturating operations, we need to shift up the LHS to get the
5961       // proper saturation width, and then shift down again afterwards.
5962       if (Saturating)
5963         LHS = DAG.getNode(ISD::SHL, DL, PromVT, LHS,
5964                           DAG.getConstant(1, DL, ShiftTy));
5965       SDValue Res = DAG.getNode(Opcode, DL, PromVT, LHS, RHS, Scale);
5966       if (Saturating)
5967         Res = DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, PromVT, Res,
5968                           DAG.getConstant(1, DL, ShiftTy));
5969       return DAG.getZExtOrTrunc(Res, DL, VT);
5970     }
5971   }
5972 
5973   return DAG.getNode(Opcode, DL, VT, LHS, RHS, Scale);
5974 }
5975 
5976 // getUnderlyingArgRegs - Find underlying registers used for a truncated,
5977 // bitcasted, or split argument. Returns a list of <Register, size in bits>
5978 static void
5979 getUnderlyingArgRegs(SmallVectorImpl<std::pair<Register, TypeSize>> &Regs,
5980                      const SDValue &N) {
5981   switch (N.getOpcode()) {
5982   case ISD::CopyFromReg: {
5983     SDValue Op = N.getOperand(1);
5984     Regs.emplace_back(cast<RegisterSDNode>(Op)->getReg(),
5985                       Op.getValueType().getSizeInBits());
5986     return;
5987   }
5988   case ISD::BITCAST:
5989   case ISD::AssertZext:
5990   case ISD::AssertSext:
5991   case ISD::TRUNCATE:
5992     getUnderlyingArgRegs(Regs, N.getOperand(0));
5993     return;
5994   case ISD::BUILD_PAIR:
5995   case ISD::BUILD_VECTOR:
5996   case ISD::CONCAT_VECTORS:
5997     for (SDValue Op : N->op_values())
5998       getUnderlyingArgRegs(Regs, Op);
5999     return;
6000   default:
6001     return;
6002   }
6003 }
6004 
6005 /// If the DbgValueInst is a dbg_value of a function argument, create the
6006 /// corresponding DBG_VALUE machine instruction for it now.  At the end of
6007 /// instruction selection, they will be inserted to the entry BB.
6008 /// We don't currently support this for variadic dbg_values, as they shouldn't
6009 /// appear for function arguments or in the prologue.
6010 bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
6011     const Value *V, DILocalVariable *Variable, DIExpression *Expr,
6012     DILocation *DL, FuncArgumentDbgValueKind Kind, const SDValue &N) {
6013   const Argument *Arg = dyn_cast<Argument>(V);
6014   if (!Arg)
6015     return false;
6016 
6017   MachineFunction &MF = DAG.getMachineFunction();
6018   const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
6019 
6020   // Helper to create DBG_INSTR_REFs or DBG_VALUEs, depending on what kind
6021   // we've been asked to pursue.
6022   auto MakeVRegDbgValue = [&](Register Reg, DIExpression *FragExpr,
6023                               bool Indirect) {
6024     if (Reg.isVirtual() && MF.useDebugInstrRef()) {
6025       // For VRegs, in instruction referencing mode, create a DBG_INSTR_REF
6026       // pointing at the VReg, which will be patched up later.
6027       auto &Inst = TII->get(TargetOpcode::DBG_INSTR_REF);
6028       SmallVector<MachineOperand, 1> MOs({MachineOperand::CreateReg(
6029           /* Reg */ Reg, /* isDef */ false, /* isImp */ false,
6030           /* isKill */ false, /* isDead */ false,
6031           /* isUndef */ false, /* isEarlyClobber */ false,
6032           /* SubReg */ 0, /* isDebug */ true)});
6033 
6034       auto *NewDIExpr = FragExpr;
6035       // We don't have an "Indirect" field in DBG_INSTR_REF, fold that into
6036       // the DIExpression.
6037       if (Indirect)
6038         NewDIExpr = DIExpression::prepend(FragExpr, DIExpression::DerefBefore);
6039       SmallVector<uint64_t, 2> Ops({dwarf::DW_OP_LLVM_arg, 0});
6040       NewDIExpr = DIExpression::prependOpcodes(NewDIExpr, Ops);
6041       return BuildMI(MF, DL, Inst, false, MOs, Variable, NewDIExpr);
6042     } else {
6043       // Create a completely standard DBG_VALUE.
6044       auto &Inst = TII->get(TargetOpcode::DBG_VALUE);
6045       return BuildMI(MF, DL, Inst, Indirect, Reg, Variable, FragExpr);
6046     }
6047   };
6048 
6049   if (Kind == FuncArgumentDbgValueKind::Value) {
6050     // ArgDbgValues are hoisted to the beginning of the entry block. So we
6051     // should only emit as ArgDbgValue if the dbg.value intrinsic is found in
6052     // the entry block.
6053     bool IsInEntryBlock = FuncInfo.MBB == &FuncInfo.MF->front();
6054     if (!IsInEntryBlock)
6055       return false;
6056 
6057     // ArgDbgValues are hoisted to the beginning of the entry block.  So we
6058     // should only emit as ArgDbgValue if the dbg.value intrinsic describes a
6059     // variable that also is a param.
6060     //
6061     // Although, if we are at the top of the entry block already, we can still
6062     // emit using ArgDbgValue. This might catch some situations when the
6063     // dbg.value refers to an argument that isn't used in the entry block, so
6064     // any CopyToReg node would be optimized out and the only way to express
6065     // this DBG_VALUE is by using the physical reg (or FI) as done in this
6066     // method.  ArgDbgValues are hoisted to the beginning of the entry block. So
6067     // we should only emit as ArgDbgValue if the Variable is an argument to the
6068     // current function, and the dbg.value intrinsic is found in the entry
6069     // block.
6070     bool VariableIsFunctionInputArg = Variable->isParameter() &&
6071         !DL->getInlinedAt();
6072     bool IsInPrologue = SDNodeOrder == LowestSDNodeOrder;
6073     if (!IsInPrologue && !VariableIsFunctionInputArg)
6074       return false;
6075 
6076     // Here we assume that a function argument on IR level only can be used to
6077     // describe one input parameter on source level. If we for example have
6078     // source code like this
6079     //
6080     //    struct A { long x, y; };
6081     //    void foo(struct A a, long b) {
6082     //      ...
6083     //      b = a.x;
6084     //      ...
6085     //    }
6086     //
6087     // and IR like this
6088     //
6089     //  define void @foo(i32 %a1, i32 %a2, i32 %b)  {
6090     //  entry:
6091     //    call void @llvm.dbg.value(metadata i32 %a1, "a", DW_OP_LLVM_fragment
6092     //    call void @llvm.dbg.value(metadata i32 %a2, "a", DW_OP_LLVM_fragment
6093     //    call void @llvm.dbg.value(metadata i32 %b, "b",
6094     //    ...
6095     //    call void @llvm.dbg.value(metadata i32 %a1, "b"
6096     //    ...
6097     //
6098     // then the last dbg.value is describing a parameter "b" using a value that
6099     // is an argument. But since we already has used %a1 to describe a parameter
6100     // we should not handle that last dbg.value here (that would result in an
6101     // incorrect hoisting of the DBG_VALUE to the function entry).
6102     // Notice that we allow one dbg.value per IR level argument, to accommodate
6103     // for the situation with fragments above.
6104     // If there is no node for the value being handled, we return true to skip
6105     // the normal generation of debug info, as it would kill existing debug
6106     // info for the parameter in case of duplicates.
6107     if (VariableIsFunctionInputArg) {
6108       unsigned ArgNo = Arg->getArgNo();
6109       if (ArgNo >= FuncInfo.DescribedArgs.size())
6110         FuncInfo.DescribedArgs.resize(ArgNo + 1, false);
6111       else if (!IsInPrologue && FuncInfo.DescribedArgs.test(ArgNo))
6112         return !NodeMap[V].getNode();
6113       FuncInfo.DescribedArgs.set(ArgNo);
6114     }
6115   }
6116 
6117   bool IsIndirect = false;
6118   std::optional<MachineOperand> Op;
6119   // Some arguments' frame index is recorded during argument lowering.
6120   int FI = FuncInfo.getArgumentFrameIndex(Arg);
6121   if (FI != std::numeric_limits<int>::max())
6122     Op = MachineOperand::CreateFI(FI);
6123 
6124   SmallVector<std::pair<Register, TypeSize>, 8> ArgRegsAndSizes;
6125   if (!Op && N.getNode()) {
6126     getUnderlyingArgRegs(ArgRegsAndSizes, N);
6127     Register Reg;
6128     if (ArgRegsAndSizes.size() == 1)
6129       Reg = ArgRegsAndSizes.front().first;
6130 
6131     if (Reg && Reg.isVirtual()) {
6132       MachineRegisterInfo &RegInfo = MF.getRegInfo();
6133       Register PR = RegInfo.getLiveInPhysReg(Reg);
6134       if (PR)
6135         Reg = PR;
6136     }
6137     if (Reg) {
6138       Op = MachineOperand::CreateReg(Reg, false);
6139       IsIndirect = Kind != FuncArgumentDbgValueKind::Value;
6140     }
6141   }
6142 
6143   if (!Op && N.getNode()) {
6144     // Check if frame index is available.
6145     SDValue LCandidate = peekThroughBitcasts(N);
6146     if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(LCandidate.getNode()))
6147       if (FrameIndexSDNode *FINode =
6148           dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
6149         Op = MachineOperand::CreateFI(FINode->getIndex());
6150   }
6151 
6152   if (!Op) {
6153     // Create a DBG_VALUE for each decomposed value in ArgRegs to cover Reg
6154     auto splitMultiRegDbgValue = [&](ArrayRef<std::pair<Register, TypeSize>>
6155                                          SplitRegs) {
6156       unsigned Offset = 0;
6157       for (const auto &RegAndSize : SplitRegs) {
6158         // If the expression is already a fragment, the current register
6159         // offset+size might extend beyond the fragment. In this case, only
6160         // the register bits that are inside the fragment are relevant.
6161         int RegFragmentSizeInBits = RegAndSize.second;
6162         if (auto ExprFragmentInfo = Expr->getFragmentInfo()) {
6163           uint64_t ExprFragmentSizeInBits = ExprFragmentInfo->SizeInBits;
6164           // The register is entirely outside the expression fragment,
6165           // so is irrelevant for debug info.
6166           if (Offset >= ExprFragmentSizeInBits)
6167             break;
6168           // The register is partially outside the expression fragment, only
6169           // the low bits within the fragment are relevant for debug info.
6170           if (Offset + RegFragmentSizeInBits > ExprFragmentSizeInBits) {
6171             RegFragmentSizeInBits = ExprFragmentSizeInBits - Offset;
6172           }
6173         }
6174 
6175         auto FragmentExpr = DIExpression::createFragmentExpression(
6176             Expr, Offset, RegFragmentSizeInBits);
6177         Offset += RegAndSize.second;
6178         // If a valid fragment expression cannot be created, the variable's
6179         // correct value cannot be determined and so it is set as Undef.
6180         if (!FragmentExpr) {
6181           SDDbgValue *SDV = DAG.getConstantDbgValue(
6182               Variable, Expr, UndefValue::get(V->getType()), DL, SDNodeOrder);
6183           DAG.AddDbgValue(SDV, false);
6184           continue;
6185         }
6186         MachineInstr *NewMI =
6187             MakeVRegDbgValue(RegAndSize.first, *FragmentExpr,
6188                              Kind != FuncArgumentDbgValueKind::Value);
6189         FuncInfo.ArgDbgValues.push_back(NewMI);
6190       }
6191     };
6192 
6193     // Check if ValueMap has reg number.
6194     DenseMap<const Value *, Register>::const_iterator
6195       VMI = FuncInfo.ValueMap.find(V);
6196     if (VMI != FuncInfo.ValueMap.end()) {
6197       const auto &TLI = DAG.getTargetLoweringInfo();
6198       RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), VMI->second,
6199                        V->getType(), std::nullopt);
6200       if (RFV.occupiesMultipleRegs()) {
6201         splitMultiRegDbgValue(RFV.getRegsAndSizes());
6202         return true;
6203       }
6204 
6205       Op = MachineOperand::CreateReg(VMI->second, false);
6206       IsIndirect = Kind != FuncArgumentDbgValueKind::Value;
6207     } else if (ArgRegsAndSizes.size() > 1) {
6208       // This was split due to the calling convention, and no virtual register
6209       // mapping exists for the value.
6210       splitMultiRegDbgValue(ArgRegsAndSizes);
6211       return true;
6212     }
6213   }
6214 
6215   if (!Op)
6216     return false;
6217 
6218   assert(Variable->isValidLocationForIntrinsic(DL) &&
6219          "Expected inlined-at fields to agree");
6220   MachineInstr *NewMI = nullptr;
6221 
6222   if (Op->isReg())
6223     NewMI = MakeVRegDbgValue(Op->getReg(), Expr, IsIndirect);
6224   else
6225     NewMI = BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), true, *Op,
6226                     Variable, Expr);
6227 
6228   // Otherwise, use ArgDbgValues.
6229   FuncInfo.ArgDbgValues.push_back(NewMI);
6230   return true;
6231 }
6232 
6233 /// Return the appropriate SDDbgValue based on N.
6234 SDDbgValue *SelectionDAGBuilder::getDbgValue(SDValue N,
6235                                              DILocalVariable *Variable,
6236                                              DIExpression *Expr,
6237                                              const DebugLoc &dl,
6238                                              unsigned DbgSDNodeOrder) {
6239   if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) {
6240     // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can describe
6241     // stack slot locations.
6242     //
6243     // Consider "int x = 0; int *px = &x;". There are two kinds of interesting
6244     // debug values here after optimization:
6245     //
6246     //   dbg.value(i32* %px, !"int *px", !DIExpression()), and
6247     //   dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref))
6248     //
6249     // Both describe the direct values of their associated variables.
6250     return DAG.getFrameIndexDbgValue(Variable, Expr, FISDN->getIndex(),
6251                                      /*IsIndirect*/ false, dl, DbgSDNodeOrder);
6252   }
6253   return DAG.getDbgValue(Variable, Expr, N.getNode(), N.getResNo(),
6254                          /*IsIndirect*/ false, dl, DbgSDNodeOrder);
6255 }
6256 
6257 static unsigned FixedPointIntrinsicToOpcode(unsigned Intrinsic) {
6258   switch (Intrinsic) {
6259   case Intrinsic::smul_fix:
6260     return ISD::SMULFIX;
6261   case Intrinsic::umul_fix:
6262     return ISD::UMULFIX;
6263   case Intrinsic::smul_fix_sat:
6264     return ISD::SMULFIXSAT;
6265   case Intrinsic::umul_fix_sat:
6266     return ISD::UMULFIXSAT;
6267   case Intrinsic::sdiv_fix:
6268     return ISD::SDIVFIX;
6269   case Intrinsic::udiv_fix:
6270     return ISD::UDIVFIX;
6271   case Intrinsic::sdiv_fix_sat:
6272     return ISD::SDIVFIXSAT;
6273   case Intrinsic::udiv_fix_sat:
6274     return ISD::UDIVFIXSAT;
6275   default:
6276     llvm_unreachable("Unhandled fixed point intrinsic");
6277   }
6278 }
6279 
6280 void SelectionDAGBuilder::lowerCallToExternalSymbol(const CallInst &I,
6281                                            const char *FunctionName) {
6282   assert(FunctionName && "FunctionName must not be nullptr");
6283   SDValue Callee = DAG.getExternalSymbol(
6284       FunctionName,
6285       DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()));
6286   LowerCallTo(I, Callee, I.isTailCall(), I.isMustTailCall());
6287 }
6288 
6289 /// Given a @llvm.call.preallocated.setup, return the corresponding
6290 /// preallocated call.
6291 static const CallBase *FindPreallocatedCall(const Value *PreallocatedSetup) {
6292   assert(cast<CallBase>(PreallocatedSetup)
6293                  ->getCalledFunction()
6294                  ->getIntrinsicID() == Intrinsic::call_preallocated_setup &&
6295          "expected call_preallocated_setup Value");
6296   for (const auto *U : PreallocatedSetup->users()) {
6297     auto *UseCall = cast<CallBase>(U);
6298     const Function *Fn = UseCall->getCalledFunction();
6299     if (!Fn || Fn->getIntrinsicID() != Intrinsic::call_preallocated_arg) {
6300       return UseCall;
6301     }
6302   }
6303   llvm_unreachable("expected corresponding call to preallocated setup/arg");
6304 }
6305 
6306 /// If DI is a debug value with an EntryValue expression, lower it using the
6307 /// corresponding physical register of the associated Argument value
6308 /// (guaranteed to exist by the verifier).
6309 bool SelectionDAGBuilder::visitEntryValueDbgValue(
6310     ArrayRef<const Value *> Values, DILocalVariable *Variable,
6311     DIExpression *Expr, DebugLoc DbgLoc) {
6312   if (!Expr->isEntryValue() || !hasSingleElement(Values))
6313     return false;
6314 
6315   // These properties are guaranteed by the verifier.
6316   const Argument *Arg = cast<Argument>(Values[0]);
6317   assert(Arg->hasAttribute(Attribute::AttrKind::SwiftAsync));
6318 
6319   auto ArgIt = FuncInfo.ValueMap.find(Arg);
6320   if (ArgIt == FuncInfo.ValueMap.end()) {
6321     LLVM_DEBUG(
6322         dbgs() << "Dropping dbg.value: expression is entry_value but "
6323                   "couldn't find an associated register for the Argument\n");
6324     return true;
6325   }
6326   Register ArgVReg = ArgIt->getSecond();
6327 
6328   for (auto [PhysReg, VirtReg] : FuncInfo.RegInfo->liveins())
6329     if (ArgVReg == VirtReg || ArgVReg == PhysReg) {
6330       SDDbgValue *SDV = DAG.getVRegDbgValue(
6331           Variable, Expr, PhysReg, false /*IsIndidrect*/, DbgLoc, SDNodeOrder);
6332       DAG.AddDbgValue(SDV, false /*treat as dbg.declare byval parameter*/);
6333       return true;
6334     }
6335   LLVM_DEBUG(dbgs() << "Dropping dbg.value: expression is entry_value but "
6336                        "couldn't find a physical register\n");
6337   return true;
6338 }
6339 
6340 /// Lower the call to the specified intrinsic function.
6341 void SelectionDAGBuilder::visitConvergenceControl(const CallInst &I,
6342                                                   unsigned Intrinsic) {
6343   SDLoc sdl = getCurSDLoc();
6344   switch (Intrinsic) {
6345   case Intrinsic::experimental_convergence_anchor:
6346     setValue(&I, DAG.getNode(ISD::CONVERGENCECTRL_ANCHOR, sdl, MVT::Untyped));
6347     break;
6348   case Intrinsic::experimental_convergence_entry:
6349     setValue(&I, DAG.getNode(ISD::CONVERGENCECTRL_ENTRY, sdl, MVT::Untyped));
6350     break;
6351   case Intrinsic::experimental_convergence_loop: {
6352     auto Bundle = I.getOperandBundle(LLVMContext::OB_convergencectrl);
6353     auto *Token = Bundle->Inputs[0].get();
6354     setValue(&I, DAG.getNode(ISD::CONVERGENCECTRL_LOOP, sdl, MVT::Untyped,
6355                              getValue(Token)));
6356     break;
6357   }
6358   }
6359 }
6360 
6361 void SelectionDAGBuilder::visitVectorHistogram(const CallInst &I,
6362                                                unsigned IntrinsicID) {
6363   // For now, we're only lowering an 'add' histogram.
6364   // We can add others later, e.g. saturating adds, min/max.
6365   assert(IntrinsicID == Intrinsic::experimental_vector_histogram_add &&
6366          "Tried to lower unsupported histogram type");
6367   SDLoc sdl = getCurSDLoc();
6368   Value *Ptr = I.getOperand(0);
6369   SDValue Inc = getValue(I.getOperand(1));
6370   SDValue Mask = getValue(I.getOperand(2));
6371 
6372   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6373   DataLayout TargetDL = DAG.getDataLayout();
6374   EVT VT = Inc.getValueType();
6375   Align Alignment = DAG.getEVTAlign(VT);
6376 
6377   const MDNode *Ranges = getRangeMetadata(I);
6378 
6379   SDValue Root = DAG.getRoot();
6380   SDValue Base;
6381   SDValue Index;
6382   ISD::MemIndexType IndexType;
6383   SDValue Scale;
6384   bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this,
6385                                     I.getParent(), VT.getScalarStoreSize());
6386 
6387   unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
6388 
6389   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
6390       MachinePointerInfo(AS),
6391       MachineMemOperand::MOLoad | MachineMemOperand::MOStore,
6392       MemoryLocation::UnknownSize, Alignment, I.getAAMetadata(), Ranges);
6393 
6394   if (!UniformBase) {
6395     Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
6396     Index = getValue(Ptr);
6397     IndexType = ISD::SIGNED_SCALED;
6398     Scale =
6399         DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
6400   }
6401 
6402   EVT IdxVT = Index.getValueType();
6403   EVT EltTy = IdxVT.getVectorElementType();
6404   if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
6405     EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
6406     Index = DAG.getNode(ISD::SIGN_EXTEND, sdl, NewIdxVT, Index);
6407   }
6408 
6409   SDValue ID = DAG.getTargetConstant(IntrinsicID, sdl, MVT::i32);
6410 
6411   SDValue Ops[] = {Root, Inc, Mask, Base, Index, Scale, ID};
6412   SDValue Histogram = DAG.getMaskedHistogram(DAG.getVTList(MVT::Other), VT, sdl,
6413                                              Ops, MMO, IndexType);
6414 
6415   setValue(&I, Histogram);
6416   DAG.setRoot(Histogram);
6417 }
6418 
6419 /// Lower the call to the specified intrinsic function.
6420 void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
6421                                              unsigned Intrinsic) {
6422   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6423   SDLoc sdl = getCurSDLoc();
6424   DebugLoc dl = getCurDebugLoc();
6425   SDValue Res;
6426 
6427   SDNodeFlags Flags;
6428   if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
6429     Flags.copyFMF(*FPOp);
6430 
6431   switch (Intrinsic) {
6432   default:
6433     // By default, turn this into a target intrinsic node.
6434     visitTargetIntrinsic(I, Intrinsic);
6435     return;
6436   case Intrinsic::vscale: {
6437     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6438     setValue(&I, DAG.getVScale(sdl, VT, APInt(VT.getSizeInBits(), 1)));
6439     return;
6440   }
6441   case Intrinsic::vastart:  visitVAStart(I); return;
6442   case Intrinsic::vaend:    visitVAEnd(I); return;
6443   case Intrinsic::vacopy:   visitVACopy(I); return;
6444   case Intrinsic::returnaddress:
6445     setValue(&I, DAG.getNode(ISD::RETURNADDR, sdl,
6446                              TLI.getValueType(DAG.getDataLayout(), I.getType()),
6447                              getValue(I.getArgOperand(0))));
6448     return;
6449   case Intrinsic::addressofreturnaddress:
6450     setValue(&I,
6451              DAG.getNode(ISD::ADDROFRETURNADDR, sdl,
6452                          TLI.getValueType(DAG.getDataLayout(), I.getType())));
6453     return;
6454   case Intrinsic::sponentry:
6455     setValue(&I,
6456              DAG.getNode(ISD::SPONENTRY, sdl,
6457                          TLI.getValueType(DAG.getDataLayout(), I.getType())));
6458     return;
6459   case Intrinsic::frameaddress:
6460     setValue(&I, DAG.getNode(ISD::FRAMEADDR, sdl,
6461                              TLI.getFrameIndexTy(DAG.getDataLayout()),
6462                              getValue(I.getArgOperand(0))));
6463     return;
6464   case Intrinsic::read_volatile_register:
6465   case Intrinsic::read_register: {
6466     Value *Reg = I.getArgOperand(0);
6467     SDValue Chain = getRoot();
6468     SDValue RegName =
6469         DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
6470     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6471     Res = DAG.getNode(ISD::READ_REGISTER, sdl,
6472       DAG.getVTList(VT, MVT::Other), Chain, RegName);
6473     setValue(&I, Res);
6474     DAG.setRoot(Res.getValue(1));
6475     return;
6476   }
6477   case Intrinsic::write_register: {
6478     Value *Reg = I.getArgOperand(0);
6479     Value *RegValue = I.getArgOperand(1);
6480     SDValue Chain = getRoot();
6481     SDValue RegName =
6482         DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
6483     DAG.setRoot(DAG.getNode(ISD::WRITE_REGISTER, sdl, MVT::Other, Chain,
6484                             RegName, getValue(RegValue)));
6485     return;
6486   }
6487   case Intrinsic::memcpy: {
6488     const auto &MCI = cast<MemCpyInst>(I);
6489     SDValue Op1 = getValue(I.getArgOperand(0));
6490     SDValue Op2 = getValue(I.getArgOperand(1));
6491     SDValue Op3 = getValue(I.getArgOperand(2));
6492     // @llvm.memcpy defines 0 and 1 to both mean no alignment.
6493     Align DstAlign = MCI.getDestAlign().valueOrOne();
6494     Align SrcAlign = MCI.getSourceAlign().valueOrOne();
6495     Align Alignment = std::min(DstAlign, SrcAlign);
6496     bool isVol = MCI.isVolatile();
6497     // FIXME: Support passing different dest/src alignments to the memcpy DAG
6498     // node.
6499     SDValue Root = isVol ? getRoot() : getMemoryRoot();
6500     SDValue MC = DAG.getMemcpy(Root, sdl, Op1, Op2, Op3, Alignment, isVol,
6501                                /* AlwaysInline */ false, &I, std::nullopt,
6502                                MachinePointerInfo(I.getArgOperand(0)),
6503                                MachinePointerInfo(I.getArgOperand(1)),
6504                                I.getAAMetadata(), AA);
6505     updateDAGForMaybeTailCall(MC);
6506     return;
6507   }
6508   case Intrinsic::memcpy_inline: {
6509     const auto &MCI = cast<MemCpyInlineInst>(I);
6510     SDValue Dst = getValue(I.getArgOperand(0));
6511     SDValue Src = getValue(I.getArgOperand(1));
6512     SDValue Size = getValue(I.getArgOperand(2));
6513     assert(isa<ConstantSDNode>(Size) && "memcpy_inline needs constant size");
6514     // @llvm.memcpy.inline defines 0 and 1 to both mean no alignment.
6515     Align DstAlign = MCI.getDestAlign().valueOrOne();
6516     Align SrcAlign = MCI.getSourceAlign().valueOrOne();
6517     Align Alignment = std::min(DstAlign, SrcAlign);
6518     bool isVol = MCI.isVolatile();
6519     // FIXME: Support passing different dest/src alignments to the memcpy DAG
6520     // node.
6521     SDValue MC = DAG.getMemcpy(getRoot(), sdl, Dst, Src, Size, Alignment, isVol,
6522                                /* AlwaysInline */ true, &I, std::nullopt,
6523                                MachinePointerInfo(I.getArgOperand(0)),
6524                                MachinePointerInfo(I.getArgOperand(1)),
6525                                I.getAAMetadata(), AA);
6526     updateDAGForMaybeTailCall(MC);
6527     return;
6528   }
6529   case Intrinsic::memset: {
6530     const auto &MSI = cast<MemSetInst>(I);
6531     SDValue Op1 = getValue(I.getArgOperand(0));
6532     SDValue Op2 = getValue(I.getArgOperand(1));
6533     SDValue Op3 = getValue(I.getArgOperand(2));
6534     // @llvm.memset defines 0 and 1 to both mean no alignment.
6535     Align Alignment = MSI.getDestAlign().valueOrOne();
6536     bool isVol = MSI.isVolatile();
6537     SDValue Root = isVol ? getRoot() : getMemoryRoot();
6538     SDValue MS = DAG.getMemset(
6539         Root, sdl, Op1, Op2, Op3, Alignment, isVol, /* AlwaysInline */ false,
6540         &I, MachinePointerInfo(I.getArgOperand(0)), I.getAAMetadata());
6541     updateDAGForMaybeTailCall(MS);
6542     return;
6543   }
6544   case Intrinsic::memset_inline: {
6545     const auto &MSII = cast<MemSetInlineInst>(I);
6546     SDValue Dst = getValue(I.getArgOperand(0));
6547     SDValue Value = getValue(I.getArgOperand(1));
6548     SDValue Size = getValue(I.getArgOperand(2));
6549     assert(isa<ConstantSDNode>(Size) && "memset_inline needs constant size");
6550     // @llvm.memset defines 0 and 1 to both mean no alignment.
6551     Align DstAlign = MSII.getDestAlign().valueOrOne();
6552     bool isVol = MSII.isVolatile();
6553     SDValue Root = isVol ? getRoot() : getMemoryRoot();
6554     SDValue MC = DAG.getMemset(Root, sdl, Dst, Value, Size, DstAlign, isVol,
6555                                /* AlwaysInline */ true, &I,
6556                                MachinePointerInfo(I.getArgOperand(0)),
6557                                I.getAAMetadata());
6558     updateDAGForMaybeTailCall(MC);
6559     return;
6560   }
6561   case Intrinsic::memmove: {
6562     const auto &MMI = cast<MemMoveInst>(I);
6563     SDValue Op1 = getValue(I.getArgOperand(0));
6564     SDValue Op2 = getValue(I.getArgOperand(1));
6565     SDValue Op3 = getValue(I.getArgOperand(2));
6566     // @llvm.memmove defines 0 and 1 to both mean no alignment.
6567     Align DstAlign = MMI.getDestAlign().valueOrOne();
6568     Align SrcAlign = MMI.getSourceAlign().valueOrOne();
6569     Align Alignment = std::min(DstAlign, SrcAlign);
6570     bool isVol = MMI.isVolatile();
6571     // FIXME: Support passing different dest/src alignments to the memmove DAG
6572     // node.
6573     SDValue Root = isVol ? getRoot() : getMemoryRoot();
6574     SDValue MM = DAG.getMemmove(Root, sdl, Op1, Op2, Op3, Alignment, isVol, &I,
6575                                 /* OverrideTailCall */ std::nullopt,
6576                                 MachinePointerInfo(I.getArgOperand(0)),
6577                                 MachinePointerInfo(I.getArgOperand(1)),
6578                                 I.getAAMetadata(), AA);
6579     updateDAGForMaybeTailCall(MM);
6580     return;
6581   }
6582   case Intrinsic::memcpy_element_unordered_atomic: {
6583     const AtomicMemCpyInst &MI = cast<AtomicMemCpyInst>(I);
6584     SDValue Dst = getValue(MI.getRawDest());
6585     SDValue Src = getValue(MI.getRawSource());
6586     SDValue Length = getValue(MI.getLength());
6587 
6588     Type *LengthTy = MI.getLength()->getType();
6589     unsigned ElemSz = MI.getElementSizeInBytes();
6590     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6591     SDValue MC =
6592         DAG.getAtomicMemcpy(getRoot(), sdl, Dst, Src, Length, LengthTy, ElemSz,
6593                             isTC, MachinePointerInfo(MI.getRawDest()),
6594                             MachinePointerInfo(MI.getRawSource()));
6595     updateDAGForMaybeTailCall(MC);
6596     return;
6597   }
6598   case Intrinsic::memmove_element_unordered_atomic: {
6599     auto &MI = cast<AtomicMemMoveInst>(I);
6600     SDValue Dst = getValue(MI.getRawDest());
6601     SDValue Src = getValue(MI.getRawSource());
6602     SDValue Length = getValue(MI.getLength());
6603 
6604     Type *LengthTy = MI.getLength()->getType();
6605     unsigned ElemSz = MI.getElementSizeInBytes();
6606     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6607     SDValue MC =
6608         DAG.getAtomicMemmove(getRoot(), sdl, Dst, Src, Length, LengthTy, ElemSz,
6609                              isTC, MachinePointerInfo(MI.getRawDest()),
6610                              MachinePointerInfo(MI.getRawSource()));
6611     updateDAGForMaybeTailCall(MC);
6612     return;
6613   }
6614   case Intrinsic::memset_element_unordered_atomic: {
6615     auto &MI = cast<AtomicMemSetInst>(I);
6616     SDValue Dst = getValue(MI.getRawDest());
6617     SDValue Val = getValue(MI.getValue());
6618     SDValue Length = getValue(MI.getLength());
6619 
6620     Type *LengthTy = MI.getLength()->getType();
6621     unsigned ElemSz = MI.getElementSizeInBytes();
6622     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6623     SDValue MC =
6624         DAG.getAtomicMemset(getRoot(), sdl, Dst, Val, Length, LengthTy, ElemSz,
6625                             isTC, MachinePointerInfo(MI.getRawDest()));
6626     updateDAGForMaybeTailCall(MC);
6627     return;
6628   }
6629   case Intrinsic::call_preallocated_setup: {
6630     const CallBase *PreallocatedCall = FindPreallocatedCall(&I);
6631     SDValue SrcValue = DAG.getSrcValue(PreallocatedCall);
6632     SDValue Res = DAG.getNode(ISD::PREALLOCATED_SETUP, sdl, MVT::Other,
6633                               getRoot(), SrcValue);
6634     setValue(&I, Res);
6635     DAG.setRoot(Res);
6636     return;
6637   }
6638   case Intrinsic::call_preallocated_arg: {
6639     const CallBase *PreallocatedCall = FindPreallocatedCall(I.getOperand(0));
6640     SDValue SrcValue = DAG.getSrcValue(PreallocatedCall);
6641     SDValue Ops[3];
6642     Ops[0] = getRoot();
6643     Ops[1] = SrcValue;
6644     Ops[2] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(1)), sdl,
6645                                    MVT::i32); // arg index
6646     SDValue Res = DAG.getNode(
6647         ISD::PREALLOCATED_ARG, sdl,
6648         DAG.getVTList(TLI.getPointerTy(DAG.getDataLayout()), MVT::Other), Ops);
6649     setValue(&I, Res);
6650     DAG.setRoot(Res.getValue(1));
6651     return;
6652   }
6653   case Intrinsic::dbg_declare: {
6654     const auto &DI = cast<DbgDeclareInst>(I);
6655     // Debug intrinsics are handled separately in assignment tracking mode.
6656     // Some intrinsics are handled right after Argument lowering.
6657     if (AssignmentTrackingEnabled ||
6658         FuncInfo.PreprocessedDbgDeclares.count(&DI))
6659       return;
6660     LLVM_DEBUG(dbgs() << "SelectionDAG visiting dbg_declare: " << DI << "\n");
6661     DILocalVariable *Variable = DI.getVariable();
6662     DIExpression *Expression = DI.getExpression();
6663     dropDanglingDebugInfo(Variable, Expression);
6664     // Assume dbg.declare can not currently use DIArgList, i.e.
6665     // it is non-variadic.
6666     assert(!DI.hasArgList() && "Only dbg.value should currently use DIArgList");
6667     handleDebugDeclare(DI.getVariableLocationOp(0), Variable, Expression,
6668                        DI.getDebugLoc());
6669     return;
6670   }
6671   case Intrinsic::dbg_label: {
6672     const DbgLabelInst &DI = cast<DbgLabelInst>(I);
6673     DILabel *Label = DI.getLabel();
6674     assert(Label && "Missing label");
6675 
6676     SDDbgLabel *SDV;
6677     SDV = DAG.getDbgLabel(Label, dl, SDNodeOrder);
6678     DAG.AddDbgLabel(SDV);
6679     return;
6680   }
6681   case Intrinsic::dbg_assign: {
6682     // Debug intrinsics are handled separately in assignment tracking mode.
6683     if (AssignmentTrackingEnabled)
6684       return;
6685     // If assignment tracking hasn't been enabled then fall through and treat
6686     // the dbg.assign as a dbg.value.
6687     [[fallthrough]];
6688   }
6689   case Intrinsic::dbg_value: {
6690     // Debug intrinsics are handled separately in assignment tracking mode.
6691     if (AssignmentTrackingEnabled)
6692       return;
6693     const DbgValueInst &DI = cast<DbgValueInst>(I);
6694     assert(DI.getVariable() && "Missing variable");
6695 
6696     DILocalVariable *Variable = DI.getVariable();
6697     DIExpression *Expression = DI.getExpression();
6698     dropDanglingDebugInfo(Variable, Expression);
6699 
6700     if (DI.isKillLocation()) {
6701       handleKillDebugValue(Variable, Expression, DI.getDebugLoc(), SDNodeOrder);
6702       return;
6703     }
6704 
6705     SmallVector<Value *, 4> Values(DI.getValues());
6706     if (Values.empty())
6707       return;
6708 
6709     bool IsVariadic = DI.hasArgList();
6710     if (!handleDebugValue(Values, Variable, Expression, DI.getDebugLoc(),
6711                           SDNodeOrder, IsVariadic))
6712       addDanglingDebugInfo(Values, Variable, Expression, IsVariadic,
6713                            DI.getDebugLoc(), SDNodeOrder);
6714     return;
6715   }
6716 
6717   case Intrinsic::eh_typeid_for: {
6718     // Find the type id for the given typeinfo.
6719     GlobalValue *GV = ExtractTypeInfo(I.getArgOperand(0));
6720     unsigned TypeID = DAG.getMachineFunction().getTypeIDFor(GV);
6721     Res = DAG.getConstant(TypeID, sdl, MVT::i32);
6722     setValue(&I, Res);
6723     return;
6724   }
6725 
6726   case Intrinsic::eh_return_i32:
6727   case Intrinsic::eh_return_i64:
6728     DAG.getMachineFunction().setCallsEHReturn(true);
6729     DAG.setRoot(DAG.getNode(ISD::EH_RETURN, sdl,
6730                             MVT::Other,
6731                             getControlRoot(),
6732                             getValue(I.getArgOperand(0)),
6733                             getValue(I.getArgOperand(1))));
6734     return;
6735   case Intrinsic::eh_unwind_init:
6736     DAG.getMachineFunction().setCallsUnwindInit(true);
6737     return;
6738   case Intrinsic::eh_dwarf_cfa:
6739     setValue(&I, DAG.getNode(ISD::EH_DWARF_CFA, sdl,
6740                              TLI.getPointerTy(DAG.getDataLayout()),
6741                              getValue(I.getArgOperand(0))));
6742     return;
6743   case Intrinsic::eh_sjlj_callsite: {
6744     ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(0));
6745     assert(FuncInfo.getCurrentCallSite() == 0 && "Overlapping call sites!");
6746 
6747     FuncInfo.setCurrentCallSite(CI->getZExtValue());
6748     return;
6749   }
6750   case Intrinsic::eh_sjlj_functioncontext: {
6751     // Get and store the index of the function context.
6752     MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
6753     AllocaInst *FnCtx =
6754       cast<AllocaInst>(I.getArgOperand(0)->stripPointerCasts());
6755     int FI = FuncInfo.StaticAllocaMap[FnCtx];
6756     MFI.setFunctionContextIndex(FI);
6757     return;
6758   }
6759   case Intrinsic::eh_sjlj_setjmp: {
6760     SDValue Ops[2];
6761     Ops[0] = getRoot();
6762     Ops[1] = getValue(I.getArgOperand(0));
6763     SDValue Op = DAG.getNode(ISD::EH_SJLJ_SETJMP, sdl,
6764                              DAG.getVTList(MVT::i32, MVT::Other), Ops);
6765     setValue(&I, Op.getValue(0));
6766     DAG.setRoot(Op.getValue(1));
6767     return;
6768   }
6769   case Intrinsic::eh_sjlj_longjmp:
6770     DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, sdl, MVT::Other,
6771                             getRoot(), getValue(I.getArgOperand(0))));
6772     return;
6773   case Intrinsic::eh_sjlj_setup_dispatch:
6774     DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_SETUP_DISPATCH, sdl, MVT::Other,
6775                             getRoot()));
6776     return;
6777   case Intrinsic::masked_gather:
6778     visitMaskedGather(I);
6779     return;
6780   case Intrinsic::masked_load:
6781     visitMaskedLoad(I);
6782     return;
6783   case Intrinsic::masked_scatter:
6784     visitMaskedScatter(I);
6785     return;
6786   case Intrinsic::masked_store:
6787     visitMaskedStore(I);
6788     return;
6789   case Intrinsic::masked_expandload:
6790     visitMaskedLoad(I, true /* IsExpanding */);
6791     return;
6792   case Intrinsic::masked_compressstore:
6793     visitMaskedStore(I, true /* IsCompressing */);
6794     return;
6795   case Intrinsic::powi:
6796     setValue(&I, ExpandPowI(sdl, getValue(I.getArgOperand(0)),
6797                             getValue(I.getArgOperand(1)), DAG));
6798     return;
6799   case Intrinsic::log:
6800     setValue(&I, expandLog(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6801     return;
6802   case Intrinsic::log2:
6803     setValue(&I,
6804              expandLog2(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6805     return;
6806   case Intrinsic::log10:
6807     setValue(&I,
6808              expandLog10(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6809     return;
6810   case Intrinsic::exp:
6811     setValue(&I, expandExp(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6812     return;
6813   case Intrinsic::exp2:
6814     setValue(&I,
6815              expandExp2(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6816     return;
6817   case Intrinsic::pow:
6818     setValue(&I, expandPow(sdl, getValue(I.getArgOperand(0)),
6819                            getValue(I.getArgOperand(1)), DAG, TLI, Flags));
6820     return;
6821   case Intrinsic::sqrt:
6822   case Intrinsic::fabs:
6823   case Intrinsic::sin:
6824   case Intrinsic::cos:
6825   case Intrinsic::tan:
6826   case Intrinsic::asin:
6827   case Intrinsic::acos:
6828   case Intrinsic::atan:
6829   case Intrinsic::sinh:
6830   case Intrinsic::cosh:
6831   case Intrinsic::tanh:
6832   case Intrinsic::exp10:
6833   case Intrinsic::floor:
6834   case Intrinsic::ceil:
6835   case Intrinsic::trunc:
6836   case Intrinsic::rint:
6837   case Intrinsic::nearbyint:
6838   case Intrinsic::round:
6839   case Intrinsic::roundeven:
6840   case Intrinsic::canonicalize: {
6841     unsigned Opcode;
6842     // clang-format off
6843     switch (Intrinsic) {
6844     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
6845     case Intrinsic::sqrt:         Opcode = ISD::FSQRT;         break;
6846     case Intrinsic::fabs:         Opcode = ISD::FABS;          break;
6847     case Intrinsic::sin:          Opcode = ISD::FSIN;          break;
6848     case Intrinsic::cos:          Opcode = ISD::FCOS;          break;
6849     case Intrinsic::tan:          Opcode = ISD::FTAN;          break;
6850     case Intrinsic::asin:         Opcode = ISD::FASIN;         break;
6851     case Intrinsic::acos:         Opcode = ISD::FACOS;         break;
6852     case Intrinsic::atan:         Opcode = ISD::FATAN;         break;
6853     case Intrinsic::sinh:         Opcode = ISD::FSINH;         break;
6854     case Intrinsic::cosh:         Opcode = ISD::FCOSH;         break;
6855     case Intrinsic::tanh:         Opcode = ISD::FTANH;         break;
6856     case Intrinsic::exp10:        Opcode = ISD::FEXP10;        break;
6857     case Intrinsic::floor:        Opcode = ISD::FFLOOR;        break;
6858     case Intrinsic::ceil:         Opcode = ISD::FCEIL;         break;
6859     case Intrinsic::trunc:        Opcode = ISD::FTRUNC;        break;
6860     case Intrinsic::rint:         Opcode = ISD::FRINT;         break;
6861     case Intrinsic::nearbyint:    Opcode = ISD::FNEARBYINT;    break;
6862     case Intrinsic::round:        Opcode = ISD::FROUND;        break;
6863     case Intrinsic::roundeven:    Opcode = ISD::FROUNDEVEN;    break;
6864     case Intrinsic::canonicalize: Opcode = ISD::FCANONICALIZE; break;
6865     }
6866     // clang-format on
6867 
6868     setValue(&I, DAG.getNode(Opcode, sdl,
6869                              getValue(I.getArgOperand(0)).getValueType(),
6870                              getValue(I.getArgOperand(0)), Flags));
6871     return;
6872   }
6873   case Intrinsic::atan2:
6874     setValue(&I, DAG.getNode(ISD::FATAN2, sdl,
6875                              getValue(I.getArgOperand(0)).getValueType(),
6876                              getValue(I.getArgOperand(0)),
6877                              getValue(I.getArgOperand(1)), Flags));
6878     return;
6879   case Intrinsic::lround:
6880   case Intrinsic::llround:
6881   case Intrinsic::lrint:
6882   case Intrinsic::llrint: {
6883     unsigned Opcode;
6884     // clang-format off
6885     switch (Intrinsic) {
6886     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
6887     case Intrinsic::lround:  Opcode = ISD::LROUND;  break;
6888     case Intrinsic::llround: Opcode = ISD::LLROUND; break;
6889     case Intrinsic::lrint:   Opcode = ISD::LRINT;   break;
6890     case Intrinsic::llrint:  Opcode = ISD::LLRINT;  break;
6891     }
6892     // clang-format on
6893 
6894     EVT RetVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6895     setValue(&I, DAG.getNode(Opcode, sdl, RetVT,
6896                              getValue(I.getArgOperand(0))));
6897     return;
6898   }
6899   case Intrinsic::minnum:
6900     setValue(&I, DAG.getNode(ISD::FMINNUM, sdl,
6901                              getValue(I.getArgOperand(0)).getValueType(),
6902                              getValue(I.getArgOperand(0)),
6903                              getValue(I.getArgOperand(1)), Flags));
6904     return;
6905   case Intrinsic::maxnum:
6906     setValue(&I, DAG.getNode(ISD::FMAXNUM, sdl,
6907                              getValue(I.getArgOperand(0)).getValueType(),
6908                              getValue(I.getArgOperand(0)),
6909                              getValue(I.getArgOperand(1)), Flags));
6910     return;
6911   case Intrinsic::minimum:
6912     setValue(&I, DAG.getNode(ISD::FMINIMUM, sdl,
6913                              getValue(I.getArgOperand(0)).getValueType(),
6914                              getValue(I.getArgOperand(0)),
6915                              getValue(I.getArgOperand(1)), Flags));
6916     return;
6917   case Intrinsic::maximum:
6918     setValue(&I, DAG.getNode(ISD::FMAXIMUM, sdl,
6919                              getValue(I.getArgOperand(0)).getValueType(),
6920                              getValue(I.getArgOperand(0)),
6921                              getValue(I.getArgOperand(1)), Flags));
6922     return;
6923   case Intrinsic::minimumnum:
6924     setValue(&I, DAG.getNode(ISD::FMINIMUMNUM, sdl,
6925                              getValue(I.getArgOperand(0)).getValueType(),
6926                              getValue(I.getArgOperand(0)),
6927                              getValue(I.getArgOperand(1)), Flags));
6928     return;
6929   case Intrinsic::maximumnum:
6930     setValue(&I, DAG.getNode(ISD::FMAXIMUMNUM, sdl,
6931                              getValue(I.getArgOperand(0)).getValueType(),
6932                              getValue(I.getArgOperand(0)),
6933                              getValue(I.getArgOperand(1)), Flags));
6934     return;
6935   case Intrinsic::copysign:
6936     setValue(&I, DAG.getNode(ISD::FCOPYSIGN, sdl,
6937                              getValue(I.getArgOperand(0)).getValueType(),
6938                              getValue(I.getArgOperand(0)),
6939                              getValue(I.getArgOperand(1)), Flags));
6940     return;
6941   case Intrinsic::ldexp:
6942     setValue(&I, DAG.getNode(ISD::FLDEXP, sdl,
6943                              getValue(I.getArgOperand(0)).getValueType(),
6944                              getValue(I.getArgOperand(0)),
6945                              getValue(I.getArgOperand(1)), Flags));
6946     return;
6947   case Intrinsic::sincos:
6948   case Intrinsic::frexp: {
6949     unsigned Opcode;
6950     switch (Intrinsic) {
6951     default:
6952       llvm_unreachable("unexpected intrinsic");
6953     case Intrinsic::sincos:
6954       Opcode = ISD::FSINCOS;
6955       break;
6956     case Intrinsic::frexp:
6957       Opcode = ISD::FFREXP;
6958       break;
6959     }
6960     SmallVector<EVT, 2> ValueVTs;
6961     ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);
6962     SDVTList VTs = DAG.getVTList(ValueVTs);
6963     setValue(
6964         &I, DAG.getNode(Opcode, sdl, VTs, getValue(I.getArgOperand(0)), Flags));
6965     return;
6966   }
6967   case Intrinsic::arithmetic_fence: {
6968     setValue(&I, DAG.getNode(ISD::ARITH_FENCE, sdl,
6969                              getValue(I.getArgOperand(0)).getValueType(),
6970                              getValue(I.getArgOperand(0)), Flags));
6971     return;
6972   }
6973   case Intrinsic::fma:
6974     setValue(&I, DAG.getNode(
6975                      ISD::FMA, sdl, getValue(I.getArgOperand(0)).getValueType(),
6976                      getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)),
6977                      getValue(I.getArgOperand(2)), Flags));
6978     return;
6979 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC)                         \
6980   case Intrinsic::INTRINSIC:
6981 #include "llvm/IR/ConstrainedOps.def"
6982     visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(I));
6983     return;
6984 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
6985 #include "llvm/IR/VPIntrinsics.def"
6986     visitVectorPredicationIntrinsic(cast<VPIntrinsic>(I));
6987     return;
6988   case Intrinsic::fptrunc_round: {
6989     // Get the last argument, the metadata and convert it to an integer in the
6990     // call
6991     Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(1))->getMetadata();
6992     std::optional<RoundingMode> RoundMode =
6993         convertStrToRoundingMode(cast<MDString>(MD)->getString());
6994 
6995     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6996 
6997     // Propagate fast-math-flags from IR to node(s).
6998     SDNodeFlags Flags;
6999     Flags.copyFMF(*cast<FPMathOperator>(&I));
7000     SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
7001 
7002     SDValue Result;
7003     Result = DAG.getNode(
7004         ISD::FPTRUNC_ROUND, sdl, VT, getValue(I.getArgOperand(0)),
7005         DAG.getTargetConstant((int)*RoundMode, sdl, MVT::i32));
7006     setValue(&I, Result);
7007 
7008     return;
7009   }
7010   case Intrinsic::fmuladd: {
7011     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7012     if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
7013         TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT)) {
7014       setValue(&I, DAG.getNode(ISD::FMA, sdl,
7015                                getValue(I.getArgOperand(0)).getValueType(),
7016                                getValue(I.getArgOperand(0)),
7017                                getValue(I.getArgOperand(1)),
7018                                getValue(I.getArgOperand(2)), Flags));
7019     } else {
7020       // TODO: Intrinsic calls should have fast-math-flags.
7021       SDValue Mul = DAG.getNode(
7022           ISD::FMUL, sdl, getValue(I.getArgOperand(0)).getValueType(),
7023           getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)), Flags);
7024       SDValue Add = DAG.getNode(ISD::FADD, sdl,
7025                                 getValue(I.getArgOperand(0)).getValueType(),
7026                                 Mul, getValue(I.getArgOperand(2)), Flags);
7027       setValue(&I, Add);
7028     }
7029     return;
7030   }
7031   case Intrinsic::convert_to_fp16:
7032     setValue(&I, DAG.getNode(ISD::BITCAST, sdl, MVT::i16,
7033                              DAG.getNode(ISD::FP_ROUND, sdl, MVT::f16,
7034                                          getValue(I.getArgOperand(0)),
7035                                          DAG.getTargetConstant(0, sdl,
7036                                                                MVT::i32))));
7037     return;
7038   case Intrinsic::convert_from_fp16:
7039     setValue(&I, DAG.getNode(ISD::FP_EXTEND, sdl,
7040                              TLI.getValueType(DAG.getDataLayout(), I.getType()),
7041                              DAG.getNode(ISD::BITCAST, sdl, MVT::f16,
7042                                          getValue(I.getArgOperand(0)))));
7043     return;
7044   case Intrinsic::fptosi_sat: {
7045     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7046     setValue(&I, DAG.getNode(ISD::FP_TO_SINT_SAT, sdl, VT,
7047                              getValue(I.getArgOperand(0)),
7048                              DAG.getValueType(VT.getScalarType())));
7049     return;
7050   }
7051   case Intrinsic::fptoui_sat: {
7052     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7053     setValue(&I, DAG.getNode(ISD::FP_TO_UINT_SAT, sdl, VT,
7054                              getValue(I.getArgOperand(0)),
7055                              DAG.getValueType(VT.getScalarType())));
7056     return;
7057   }
7058   case Intrinsic::set_rounding:
7059     Res = DAG.getNode(ISD::SET_ROUNDING, sdl, MVT::Other,
7060                       {getRoot(), getValue(I.getArgOperand(0))});
7061     setValue(&I, Res);
7062     DAG.setRoot(Res.getValue(0));
7063     return;
7064   case Intrinsic::is_fpclass: {
7065     const DataLayout DLayout = DAG.getDataLayout();
7066     EVT DestVT = TLI.getValueType(DLayout, I.getType());
7067     EVT ArgVT = TLI.getValueType(DLayout, I.getArgOperand(0)->getType());
7068     FPClassTest Test = static_cast<FPClassTest>(
7069         cast<ConstantInt>(I.getArgOperand(1))->getZExtValue());
7070     MachineFunction &MF = DAG.getMachineFunction();
7071     const Function &F = MF.getFunction();
7072     SDValue Op = getValue(I.getArgOperand(0));
7073     SDNodeFlags Flags;
7074     Flags.setNoFPExcept(
7075         !F.getAttributes().hasFnAttr(llvm::Attribute::StrictFP));
7076     // If ISD::IS_FPCLASS should be expanded, do it right now, because the
7077     // expansion can use illegal types. Making expansion early allows
7078     // legalizing these types prior to selection.
7079     if (!TLI.isOperationLegal(ISD::IS_FPCLASS, ArgVT) &&
7080         !TLI.isOperationCustom(ISD::IS_FPCLASS, ArgVT)) {
7081       SDValue Result = TLI.expandIS_FPCLASS(DestVT, Op, Test, Flags, sdl, DAG);
7082       setValue(&I, Result);
7083       return;
7084     }
7085 
7086     SDValue Check = DAG.getTargetConstant(Test, sdl, MVT::i32);
7087     SDValue V = DAG.getNode(ISD::IS_FPCLASS, sdl, DestVT, {Op, Check}, Flags);
7088     setValue(&I, V);
7089     return;
7090   }
7091   case Intrinsic::get_fpenv: {
7092     const DataLayout DLayout = DAG.getDataLayout();
7093     EVT EnvVT = TLI.getValueType(DLayout, I.getType());
7094     Align TempAlign = DAG.getEVTAlign(EnvVT);
7095     SDValue Chain = getRoot();
7096     // Use GET_FPENV if it is legal or custom. Otherwise use memory-based node
7097     // and temporary storage in stack.
7098     if (TLI.isOperationLegalOrCustom(ISD::GET_FPENV, EnvVT)) {
7099       Res = DAG.getNode(
7100           ISD::GET_FPENV, sdl,
7101           DAG.getVTList(TLI.getValueType(DAG.getDataLayout(), I.getType()),
7102                         MVT::Other),
7103           Chain);
7104     } else {
7105       SDValue Temp = DAG.CreateStackTemporary(EnvVT, TempAlign.value());
7106       int SPFI = cast<FrameIndexSDNode>(Temp.getNode())->getIndex();
7107       auto MPI =
7108           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
7109       MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
7110           MPI, MachineMemOperand::MOStore, LocationSize::beforeOrAfterPointer(),
7111           TempAlign);
7112       Chain = DAG.getGetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
7113       Res = DAG.getLoad(EnvVT, sdl, Chain, Temp, MPI);
7114     }
7115     setValue(&I, Res);
7116     DAG.setRoot(Res.getValue(1));
7117     return;
7118   }
7119   case Intrinsic::set_fpenv: {
7120     const DataLayout DLayout = DAG.getDataLayout();
7121     SDValue Env = getValue(I.getArgOperand(0));
7122     EVT EnvVT = Env.getValueType();
7123     Align TempAlign = DAG.getEVTAlign(EnvVT);
7124     SDValue Chain = getRoot();
7125     // If SET_FPENV is custom or legal, use it. Otherwise use loading
7126     // environment from memory.
7127     if (TLI.isOperationLegalOrCustom(ISD::SET_FPENV, EnvVT)) {
7128       Chain = DAG.getNode(ISD::SET_FPENV, sdl, MVT::Other, Chain, Env);
7129     } else {
7130       // Allocate space in stack, copy environment bits into it and use this
7131       // memory in SET_FPENV_MEM.
7132       SDValue Temp = DAG.CreateStackTemporary(EnvVT, TempAlign.value());
7133       int SPFI = cast<FrameIndexSDNode>(Temp.getNode())->getIndex();
7134       auto MPI =
7135           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
7136       Chain = DAG.getStore(Chain, sdl, Env, Temp, MPI, TempAlign,
7137                            MachineMemOperand::MOStore);
7138       MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
7139           MPI, MachineMemOperand::MOLoad, LocationSize::beforeOrAfterPointer(),
7140           TempAlign);
7141       Chain = DAG.getSetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
7142     }
7143     DAG.setRoot(Chain);
7144     return;
7145   }
7146   case Intrinsic::reset_fpenv:
7147     DAG.setRoot(DAG.getNode(ISD::RESET_FPENV, sdl, MVT::Other, getRoot()));
7148     return;
7149   case Intrinsic::get_fpmode:
7150     Res = DAG.getNode(
7151         ISD::GET_FPMODE, sdl,
7152         DAG.getVTList(TLI.getValueType(DAG.getDataLayout(), I.getType()),
7153                       MVT::Other),
7154         DAG.getRoot());
7155     setValue(&I, Res);
7156     DAG.setRoot(Res.getValue(1));
7157     return;
7158   case Intrinsic::set_fpmode:
7159     Res = DAG.getNode(ISD::SET_FPMODE, sdl, MVT::Other, {DAG.getRoot()},
7160                       getValue(I.getArgOperand(0)));
7161     DAG.setRoot(Res);
7162     return;
7163   case Intrinsic::reset_fpmode: {
7164     Res = DAG.getNode(ISD::RESET_FPMODE, sdl, MVT::Other, getRoot());
7165     DAG.setRoot(Res);
7166     return;
7167   }
7168   case Intrinsic::pcmarker: {
7169     SDValue Tmp = getValue(I.getArgOperand(0));
7170     DAG.setRoot(DAG.getNode(ISD::PCMARKER, sdl, MVT::Other, getRoot(), Tmp));
7171     return;
7172   }
7173   case Intrinsic::readcyclecounter: {
7174     SDValue Op = getRoot();
7175     Res = DAG.getNode(ISD::READCYCLECOUNTER, sdl,
7176                       DAG.getVTList(MVT::i64, MVT::Other), Op);
7177     setValue(&I, Res);
7178     DAG.setRoot(Res.getValue(1));
7179     return;
7180   }
7181   case Intrinsic::readsteadycounter: {
7182     SDValue Op = getRoot();
7183     Res = DAG.getNode(ISD::READSTEADYCOUNTER, sdl,
7184                       DAG.getVTList(MVT::i64, MVT::Other), Op);
7185     setValue(&I, Res);
7186     DAG.setRoot(Res.getValue(1));
7187     return;
7188   }
7189   case Intrinsic::bitreverse:
7190     setValue(&I, DAG.getNode(ISD::BITREVERSE, sdl,
7191                              getValue(I.getArgOperand(0)).getValueType(),
7192                              getValue(I.getArgOperand(0))));
7193     return;
7194   case Intrinsic::bswap:
7195     setValue(&I, DAG.getNode(ISD::BSWAP, sdl,
7196                              getValue(I.getArgOperand(0)).getValueType(),
7197                              getValue(I.getArgOperand(0))));
7198     return;
7199   case Intrinsic::cttz: {
7200     SDValue Arg = getValue(I.getArgOperand(0));
7201     ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
7202     EVT Ty = Arg.getValueType();
7203     setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_UNDEF,
7204                              sdl, Ty, Arg));
7205     return;
7206   }
7207   case Intrinsic::ctlz: {
7208     SDValue Arg = getValue(I.getArgOperand(0));
7209     ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
7210     EVT Ty = Arg.getValueType();
7211     setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_UNDEF,
7212                              sdl, Ty, Arg));
7213     return;
7214   }
7215   case Intrinsic::ctpop: {
7216     SDValue Arg = getValue(I.getArgOperand(0));
7217     EVT Ty = Arg.getValueType();
7218     setValue(&I, DAG.getNode(ISD::CTPOP, sdl, Ty, Arg));
7219     return;
7220   }
7221   case Intrinsic::fshl:
7222   case Intrinsic::fshr: {
7223     bool IsFSHL = Intrinsic == Intrinsic::fshl;
7224     SDValue X = getValue(I.getArgOperand(0));
7225     SDValue Y = getValue(I.getArgOperand(1));
7226     SDValue Z = getValue(I.getArgOperand(2));
7227     EVT VT = X.getValueType();
7228 
7229     if (X == Y) {
7230       auto RotateOpcode = IsFSHL ? ISD::ROTL : ISD::ROTR;
7231       setValue(&I, DAG.getNode(RotateOpcode, sdl, VT, X, Z));
7232     } else {
7233       auto FunnelOpcode = IsFSHL ? ISD::FSHL : ISD::FSHR;
7234       setValue(&I, DAG.getNode(FunnelOpcode, sdl, VT, X, Y, Z));
7235     }
7236     return;
7237   }
7238   case Intrinsic::sadd_sat: {
7239     SDValue Op1 = getValue(I.getArgOperand(0));
7240     SDValue Op2 = getValue(I.getArgOperand(1));
7241     setValue(&I, DAG.getNode(ISD::SADDSAT, sdl, Op1.getValueType(), Op1, Op2));
7242     return;
7243   }
7244   case Intrinsic::uadd_sat: {
7245     SDValue Op1 = getValue(I.getArgOperand(0));
7246     SDValue Op2 = getValue(I.getArgOperand(1));
7247     setValue(&I, DAG.getNode(ISD::UADDSAT, sdl, Op1.getValueType(), Op1, Op2));
7248     return;
7249   }
7250   case Intrinsic::ssub_sat: {
7251     SDValue Op1 = getValue(I.getArgOperand(0));
7252     SDValue Op2 = getValue(I.getArgOperand(1));
7253     setValue(&I, DAG.getNode(ISD::SSUBSAT, sdl, Op1.getValueType(), Op1, Op2));
7254     return;
7255   }
7256   case Intrinsic::usub_sat: {
7257     SDValue Op1 = getValue(I.getArgOperand(0));
7258     SDValue Op2 = getValue(I.getArgOperand(1));
7259     setValue(&I, DAG.getNode(ISD::USUBSAT, sdl, Op1.getValueType(), Op1, Op2));
7260     return;
7261   }
7262   case Intrinsic::sshl_sat: {
7263     SDValue Op1 = getValue(I.getArgOperand(0));
7264     SDValue Op2 = getValue(I.getArgOperand(1));
7265     setValue(&I, DAG.getNode(ISD::SSHLSAT, sdl, Op1.getValueType(), Op1, Op2));
7266     return;
7267   }
7268   case Intrinsic::ushl_sat: {
7269     SDValue Op1 = getValue(I.getArgOperand(0));
7270     SDValue Op2 = getValue(I.getArgOperand(1));
7271     setValue(&I, DAG.getNode(ISD::USHLSAT, sdl, Op1.getValueType(), Op1, Op2));
7272     return;
7273   }
7274   case Intrinsic::smul_fix:
7275   case Intrinsic::umul_fix:
7276   case Intrinsic::smul_fix_sat:
7277   case Intrinsic::umul_fix_sat: {
7278     SDValue Op1 = getValue(I.getArgOperand(0));
7279     SDValue Op2 = getValue(I.getArgOperand(1));
7280     SDValue Op3 = getValue(I.getArgOperand(2));
7281     setValue(&I, DAG.getNode(FixedPointIntrinsicToOpcode(Intrinsic), sdl,
7282                              Op1.getValueType(), Op1, Op2, Op3));
7283     return;
7284   }
7285   case Intrinsic::sdiv_fix:
7286   case Intrinsic::udiv_fix:
7287   case Intrinsic::sdiv_fix_sat:
7288   case Intrinsic::udiv_fix_sat: {
7289     SDValue Op1 = getValue(I.getArgOperand(0));
7290     SDValue Op2 = getValue(I.getArgOperand(1));
7291     SDValue Op3 = getValue(I.getArgOperand(2));
7292     setValue(&I, expandDivFix(FixedPointIntrinsicToOpcode(Intrinsic), sdl,
7293                               Op1, Op2, Op3, DAG, TLI));
7294     return;
7295   }
7296   case Intrinsic::smax: {
7297     SDValue Op1 = getValue(I.getArgOperand(0));
7298     SDValue Op2 = getValue(I.getArgOperand(1));
7299     setValue(&I, DAG.getNode(ISD::SMAX, sdl, Op1.getValueType(), Op1, Op2));
7300     return;
7301   }
7302   case Intrinsic::smin: {
7303     SDValue Op1 = getValue(I.getArgOperand(0));
7304     SDValue Op2 = getValue(I.getArgOperand(1));
7305     setValue(&I, DAG.getNode(ISD::SMIN, sdl, Op1.getValueType(), Op1, Op2));
7306     return;
7307   }
7308   case Intrinsic::umax: {
7309     SDValue Op1 = getValue(I.getArgOperand(0));
7310     SDValue Op2 = getValue(I.getArgOperand(1));
7311     setValue(&I, DAG.getNode(ISD::UMAX, sdl, Op1.getValueType(), Op1, Op2));
7312     return;
7313   }
7314   case Intrinsic::umin: {
7315     SDValue Op1 = getValue(I.getArgOperand(0));
7316     SDValue Op2 = getValue(I.getArgOperand(1));
7317     setValue(&I, DAG.getNode(ISD::UMIN, sdl, Op1.getValueType(), Op1, Op2));
7318     return;
7319   }
7320   case Intrinsic::abs: {
7321     // TODO: Preserve "int min is poison" arg in SDAG?
7322     SDValue Op1 = getValue(I.getArgOperand(0));
7323     setValue(&I, DAG.getNode(ISD::ABS, sdl, Op1.getValueType(), Op1));
7324     return;
7325   }
7326   case Intrinsic::scmp: {
7327     SDValue Op1 = getValue(I.getArgOperand(0));
7328     SDValue Op2 = getValue(I.getArgOperand(1));
7329     EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7330     setValue(&I, DAG.getNode(ISD::SCMP, sdl, DestVT, Op1, Op2));
7331     break;
7332   }
7333   case Intrinsic::ucmp: {
7334     SDValue Op1 = getValue(I.getArgOperand(0));
7335     SDValue Op2 = getValue(I.getArgOperand(1));
7336     EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7337     setValue(&I, DAG.getNode(ISD::UCMP, sdl, DestVT, Op1, Op2));
7338     break;
7339   }
7340   case Intrinsic::stacksave: {
7341     SDValue Op = getRoot();
7342     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7343     Res = DAG.getNode(ISD::STACKSAVE, sdl, DAG.getVTList(VT, MVT::Other), Op);
7344     setValue(&I, Res);
7345     DAG.setRoot(Res.getValue(1));
7346     return;
7347   }
7348   case Intrinsic::stackrestore:
7349     Res = getValue(I.getArgOperand(0));
7350     DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other, getRoot(), Res));
7351     return;
7352   case Intrinsic::get_dynamic_area_offset: {
7353     SDValue Op = getRoot();
7354     EVT PtrTy = TLI.getFrameIndexTy(DAG.getDataLayout());
7355     EVT ResTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
7356     // Result type for @llvm.get.dynamic.area.offset should match PtrTy for
7357     // target.
7358     if (PtrTy.getFixedSizeInBits() < ResTy.getFixedSizeInBits())
7359       report_fatal_error("Wrong result type for @llvm.get.dynamic.area.offset"
7360                          " intrinsic!");
7361     Res = DAG.getNode(ISD::GET_DYNAMIC_AREA_OFFSET, sdl, DAG.getVTList(ResTy),
7362                       Op);
7363     DAG.setRoot(Op);
7364     setValue(&I, Res);
7365     return;
7366   }
7367   case Intrinsic::stackguard: {
7368     MachineFunction &MF = DAG.getMachineFunction();
7369     const Module &M = *MF.getFunction().getParent();
7370     EVT PtrTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
7371     SDValue Chain = getRoot();
7372     if (TLI.useLoadStackGuardNode(M)) {
7373       Res = getLoadStackGuard(DAG, sdl, Chain);
7374       Res = DAG.getPtrExtOrTrunc(Res, sdl, PtrTy);
7375     } else {
7376       const Value *Global = TLI.getSDagStackGuard(M);
7377       Align Align = DAG.getDataLayout().getPrefTypeAlign(Global->getType());
7378       Res = DAG.getLoad(PtrTy, sdl, Chain, getValue(Global),
7379                         MachinePointerInfo(Global, 0), Align,
7380                         MachineMemOperand::MOVolatile);
7381     }
7382     if (TLI.useStackGuardXorFP())
7383       Res = TLI.emitStackGuardXorFP(DAG, Res, sdl);
7384     DAG.setRoot(Chain);
7385     setValue(&I, Res);
7386     return;
7387   }
7388   case Intrinsic::stackprotector: {
7389     // Emit code into the DAG to store the stack guard onto the stack.
7390     MachineFunction &MF = DAG.getMachineFunction();
7391     MachineFrameInfo &MFI = MF.getFrameInfo();
7392     const Module &M = *MF.getFunction().getParent();
7393     SDValue Src, Chain = getRoot();
7394 
7395     if (TLI.useLoadStackGuardNode(M))
7396       Src = getLoadStackGuard(DAG, sdl, Chain);
7397     else
7398       Src = getValue(I.getArgOperand(0));   // The guard's value.
7399 
7400     AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
7401 
7402     int FI = FuncInfo.StaticAllocaMap[Slot];
7403     MFI.setStackProtectorIndex(FI);
7404     EVT PtrTy = TLI.getFrameIndexTy(DAG.getDataLayout());
7405 
7406     SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
7407 
7408     // Store the stack protector onto the stack.
7409     Res = DAG.getStore(
7410         Chain, sdl, Src, FIN,
7411         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
7412         MaybeAlign(), MachineMemOperand::MOVolatile);
7413     setValue(&I, Res);
7414     DAG.setRoot(Res);
7415     return;
7416   }
7417   case Intrinsic::objectsize:
7418     llvm_unreachable("llvm.objectsize.* should have been lowered already");
7419 
7420   case Intrinsic::is_constant:
7421     llvm_unreachable("llvm.is.constant.* should have been lowered already");
7422 
7423   case Intrinsic::annotation:
7424   case Intrinsic::ptr_annotation:
7425   case Intrinsic::launder_invariant_group:
7426   case Intrinsic::strip_invariant_group:
7427     // Drop the intrinsic, but forward the value
7428     setValue(&I, getValue(I.getOperand(0)));
7429     return;
7430 
7431   case Intrinsic::assume:
7432   case Intrinsic::experimental_noalias_scope_decl:
7433   case Intrinsic::var_annotation:
7434   case Intrinsic::sideeffect:
7435     // Discard annotate attributes, noalias scope declarations, assumptions, and
7436     // artificial side-effects.
7437     return;
7438 
7439   case Intrinsic::codeview_annotation: {
7440     // Emit a label associated with this metadata.
7441     MachineFunction &MF = DAG.getMachineFunction();
7442     MCSymbol *Label = MF.getContext().createTempSymbol("annotation", true);
7443     Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(0))->getMetadata();
7444     MF.addCodeViewAnnotation(Label, cast<MDNode>(MD));
7445     Res = DAG.getLabelNode(ISD::ANNOTATION_LABEL, sdl, getRoot(), Label);
7446     DAG.setRoot(Res);
7447     return;
7448   }
7449 
7450   case Intrinsic::init_trampoline: {
7451     const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts());
7452 
7453     SDValue Ops[6];
7454     Ops[0] = getRoot();
7455     Ops[1] = getValue(I.getArgOperand(0));
7456     Ops[2] = getValue(I.getArgOperand(1));
7457     Ops[3] = getValue(I.getArgOperand(2));
7458     Ops[4] = DAG.getSrcValue(I.getArgOperand(0));
7459     Ops[5] = DAG.getSrcValue(F);
7460 
7461     Res = DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other, Ops);
7462 
7463     DAG.setRoot(Res);
7464     return;
7465   }
7466   case Intrinsic::adjust_trampoline:
7467     setValue(&I, DAG.getNode(ISD::ADJUST_TRAMPOLINE, sdl,
7468                              TLI.getPointerTy(DAG.getDataLayout()),
7469                              getValue(I.getArgOperand(0))));
7470     return;
7471   case Intrinsic::gcroot: {
7472     assert(DAG.getMachineFunction().getFunction().hasGC() &&
7473            "only valid in functions with gc specified, enforced by Verifier");
7474     assert(GFI && "implied by previous");
7475     const Value *Alloca = I.getArgOperand(0)->stripPointerCasts();
7476     const Constant *TypeMap = cast<Constant>(I.getArgOperand(1));
7477 
7478     FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
7479     GFI->addStackRoot(FI->getIndex(), TypeMap);
7480     return;
7481   }
7482   case Intrinsic::gcread:
7483   case Intrinsic::gcwrite:
7484     llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
7485   case Intrinsic::get_rounding:
7486     Res = DAG.getNode(ISD::GET_ROUNDING, sdl, {MVT::i32, MVT::Other}, getRoot());
7487     setValue(&I, Res);
7488     DAG.setRoot(Res.getValue(1));
7489     return;
7490 
7491   case Intrinsic::expect:
7492     // Just replace __builtin_expect(exp, c) with EXP.
7493     setValue(&I, getValue(I.getArgOperand(0)));
7494     return;
7495 
7496   case Intrinsic::ubsantrap:
7497   case Intrinsic::debugtrap:
7498   case Intrinsic::trap: {
7499     StringRef TrapFuncName =
7500         I.getAttributes().getFnAttr("trap-func-name").getValueAsString();
7501     if (TrapFuncName.empty()) {
7502       switch (Intrinsic) {
7503       case Intrinsic::trap:
7504         DAG.setRoot(DAG.getNode(ISD::TRAP, sdl, MVT::Other, getRoot()));
7505         break;
7506       case Intrinsic::debugtrap:
7507         DAG.setRoot(DAG.getNode(ISD::DEBUGTRAP, sdl, MVT::Other, getRoot()));
7508         break;
7509       case Intrinsic::ubsantrap:
7510         DAG.setRoot(DAG.getNode(
7511             ISD::UBSANTRAP, sdl, MVT::Other, getRoot(),
7512             DAG.getTargetConstant(
7513                 cast<ConstantInt>(I.getArgOperand(0))->getZExtValue(), sdl,
7514                 MVT::i32)));
7515         break;
7516       default: llvm_unreachable("unknown trap intrinsic");
7517       }
7518       DAG.addNoMergeSiteInfo(DAG.getRoot().getNode(),
7519                              I.hasFnAttr(Attribute::NoMerge));
7520       return;
7521     }
7522     TargetLowering::ArgListTy Args;
7523     if (Intrinsic == Intrinsic::ubsantrap) {
7524       Args.push_back(TargetLoweringBase::ArgListEntry());
7525       Args[0].Val = I.getArgOperand(0);
7526       Args[0].Node = getValue(Args[0].Val);
7527       Args[0].Ty = Args[0].Val->getType();
7528     }
7529 
7530     TargetLowering::CallLoweringInfo CLI(DAG);
7531     CLI.setDebugLoc(sdl).setChain(getRoot()).setLibCallee(
7532         CallingConv::C, I.getType(),
7533         DAG.getExternalSymbol(TrapFuncName.data(),
7534                               TLI.getPointerTy(DAG.getDataLayout())),
7535         std::move(Args));
7536     CLI.NoMerge = I.hasFnAttr(Attribute::NoMerge);
7537     std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
7538     DAG.setRoot(Result.second);
7539     return;
7540   }
7541 
7542   case Intrinsic::allow_runtime_check:
7543   case Intrinsic::allow_ubsan_check:
7544     setValue(&I, getValue(ConstantInt::getTrue(I.getType())));
7545     return;
7546 
7547   case Intrinsic::uadd_with_overflow:
7548   case Intrinsic::sadd_with_overflow:
7549   case Intrinsic::usub_with_overflow:
7550   case Intrinsic::ssub_with_overflow:
7551   case Intrinsic::umul_with_overflow:
7552   case Intrinsic::smul_with_overflow: {
7553     ISD::NodeType Op;
7554     switch (Intrinsic) {
7555     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
7556     case Intrinsic::uadd_with_overflow: Op = ISD::UADDO; break;
7557     case Intrinsic::sadd_with_overflow: Op = ISD::SADDO; break;
7558     case Intrinsic::usub_with_overflow: Op = ISD::USUBO; break;
7559     case Intrinsic::ssub_with_overflow: Op = ISD::SSUBO; break;
7560     case Intrinsic::umul_with_overflow: Op = ISD::UMULO; break;
7561     case Intrinsic::smul_with_overflow: Op = ISD::SMULO; break;
7562     }
7563     SDValue Op1 = getValue(I.getArgOperand(0));
7564     SDValue Op2 = getValue(I.getArgOperand(1));
7565 
7566     EVT ResultVT = Op1.getValueType();
7567     EVT OverflowVT = MVT::i1;
7568     if (ResultVT.isVector())
7569       OverflowVT = EVT::getVectorVT(
7570           *Context, OverflowVT, ResultVT.getVectorElementCount());
7571 
7572     SDVTList VTs = DAG.getVTList(ResultVT, OverflowVT);
7573     setValue(&I, DAG.getNode(Op, sdl, VTs, Op1, Op2));
7574     return;
7575   }
7576   case Intrinsic::prefetch: {
7577     SDValue Ops[5];
7578     unsigned rw = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
7579     auto Flags = rw == 0 ? MachineMemOperand::MOLoad :MachineMemOperand::MOStore;
7580     Ops[0] = DAG.getRoot();
7581     Ops[1] = getValue(I.getArgOperand(0));
7582     Ops[2] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(1)), sdl,
7583                                    MVT::i32);
7584     Ops[3] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(2)), sdl,
7585                                    MVT::i32);
7586     Ops[4] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(3)), sdl,
7587                                    MVT::i32);
7588     SDValue Result = DAG.getMemIntrinsicNode(
7589         ISD::PREFETCH, sdl, DAG.getVTList(MVT::Other), Ops,
7590         EVT::getIntegerVT(*Context, 8), MachinePointerInfo(I.getArgOperand(0)),
7591         /* align */ std::nullopt, Flags);
7592 
7593     // Chain the prefetch in parallel with any pending loads, to stay out of
7594     // the way of later optimizations.
7595     PendingLoads.push_back(Result);
7596     Result = getRoot();
7597     DAG.setRoot(Result);
7598     return;
7599   }
7600   case Intrinsic::lifetime_start:
7601   case Intrinsic::lifetime_end: {
7602     bool IsStart = (Intrinsic == Intrinsic::lifetime_start);
7603     // Stack coloring is not enabled in O0, discard region information.
7604     if (TM.getOptLevel() == CodeGenOptLevel::None)
7605       return;
7606 
7607     const int64_t ObjectSize =
7608         cast<ConstantInt>(I.getArgOperand(0))->getSExtValue();
7609     Value *const ObjectPtr = I.getArgOperand(1);
7610     SmallVector<const Value *, 4> Allocas;
7611     getUnderlyingObjects(ObjectPtr, Allocas);
7612 
7613     for (const Value *Alloca : Allocas) {
7614       const AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(Alloca);
7615 
7616       // Could not find an Alloca.
7617       if (!LifetimeObject)
7618         continue;
7619 
7620       // First check that the Alloca is static, otherwise it won't have a
7621       // valid frame index.
7622       auto SI = FuncInfo.StaticAllocaMap.find(LifetimeObject);
7623       if (SI == FuncInfo.StaticAllocaMap.end())
7624         return;
7625 
7626       const int FrameIndex = SI->second;
7627       int64_t Offset;
7628       if (GetPointerBaseWithConstantOffset(
7629               ObjectPtr, Offset, DAG.getDataLayout()) != LifetimeObject)
7630         Offset = -1; // Cannot determine offset from alloca to lifetime object.
7631       Res = DAG.getLifetimeNode(IsStart, sdl, getRoot(), FrameIndex, ObjectSize,
7632                                 Offset);
7633       DAG.setRoot(Res);
7634     }
7635     return;
7636   }
7637   case Intrinsic::pseudoprobe: {
7638     auto Guid = cast<ConstantInt>(I.getArgOperand(0))->getZExtValue();
7639     auto Index = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
7640     auto Attr = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue();
7641     Res = DAG.getPseudoProbeNode(sdl, getRoot(), Guid, Index, Attr);
7642     DAG.setRoot(Res);
7643     return;
7644   }
7645   case Intrinsic::invariant_start:
7646     // Discard region information.
7647     setValue(&I,
7648              DAG.getUNDEF(TLI.getValueType(DAG.getDataLayout(), I.getType())));
7649     return;
7650   case Intrinsic::invariant_end:
7651     // Discard region information.
7652     return;
7653   case Intrinsic::clear_cache: {
7654     SDValue InputChain = DAG.getRoot();
7655     SDValue StartVal = getValue(I.getArgOperand(0));
7656     SDValue EndVal = getValue(I.getArgOperand(1));
7657     Res = DAG.getNode(ISD::CLEAR_CACHE, sdl, DAG.getVTList(MVT::Other),
7658                       {InputChain, StartVal, EndVal});
7659     setValue(&I, Res);
7660     DAG.setRoot(Res);
7661     return;
7662   }
7663   case Intrinsic::donothing:
7664   case Intrinsic::seh_try_begin:
7665   case Intrinsic::seh_scope_begin:
7666   case Intrinsic::seh_try_end:
7667   case Intrinsic::seh_scope_end:
7668     // ignore
7669     return;
7670   case Intrinsic::experimental_stackmap:
7671     visitStackmap(I);
7672     return;
7673   case Intrinsic::experimental_patchpoint_void:
7674   case Intrinsic::experimental_patchpoint:
7675     visitPatchpoint(I);
7676     return;
7677   case Intrinsic::experimental_gc_statepoint:
7678     LowerStatepoint(cast<GCStatepointInst>(I));
7679     return;
7680   case Intrinsic::experimental_gc_result:
7681     visitGCResult(cast<GCResultInst>(I));
7682     return;
7683   case Intrinsic::experimental_gc_relocate:
7684     visitGCRelocate(cast<GCRelocateInst>(I));
7685     return;
7686   case Intrinsic::instrprof_cover:
7687     llvm_unreachable("instrprof failed to lower a cover");
7688   case Intrinsic::instrprof_increment:
7689     llvm_unreachable("instrprof failed to lower an increment");
7690   case Intrinsic::instrprof_timestamp:
7691     llvm_unreachable("instrprof failed to lower a timestamp");
7692   case Intrinsic::instrprof_value_profile:
7693     llvm_unreachable("instrprof failed to lower a value profiling call");
7694   case Intrinsic::instrprof_mcdc_parameters:
7695     llvm_unreachable("instrprof failed to lower mcdc parameters");
7696   case Intrinsic::instrprof_mcdc_tvbitmap_update:
7697     llvm_unreachable("instrprof failed to lower an mcdc tvbitmap update");
7698   case Intrinsic::localescape: {
7699     MachineFunction &MF = DAG.getMachineFunction();
7700     const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
7701 
7702     // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission
7703     // is the same on all targets.
7704     for (unsigned Idx = 0, E = I.arg_size(); Idx < E; ++Idx) {
7705       Value *Arg = I.getArgOperand(Idx)->stripPointerCasts();
7706       if (isa<ConstantPointerNull>(Arg))
7707         continue; // Skip null pointers. They represent a hole in index space.
7708       AllocaInst *Slot = cast<AllocaInst>(Arg);
7709       assert(FuncInfo.StaticAllocaMap.count(Slot) &&
7710              "can only escape static allocas");
7711       int FI = FuncInfo.StaticAllocaMap[Slot];
7712       MCSymbol *FrameAllocSym = MF.getContext().getOrCreateFrameAllocSymbol(
7713           GlobalValue::dropLLVMManglingEscape(MF.getName()), Idx);
7714       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, dl,
7715               TII->get(TargetOpcode::LOCAL_ESCAPE))
7716           .addSym(FrameAllocSym)
7717           .addFrameIndex(FI);
7718     }
7719 
7720     return;
7721   }
7722 
7723   case Intrinsic::localrecover: {
7724     // i8* @llvm.localrecover(i8* %fn, i8* %fp, i32 %idx)
7725     MachineFunction &MF = DAG.getMachineFunction();
7726 
7727     // Get the symbol that defines the frame offset.
7728     auto *Fn = cast<Function>(I.getArgOperand(0)->stripPointerCasts());
7729     auto *Idx = cast<ConstantInt>(I.getArgOperand(2));
7730     unsigned IdxVal =
7731         unsigned(Idx->getLimitedValue(std::numeric_limits<int>::max()));
7732     MCSymbol *FrameAllocSym = MF.getContext().getOrCreateFrameAllocSymbol(
7733         GlobalValue::dropLLVMManglingEscape(Fn->getName()), IdxVal);
7734 
7735     Value *FP = I.getArgOperand(1);
7736     SDValue FPVal = getValue(FP);
7737     EVT PtrVT = FPVal.getValueType();
7738 
7739     // Create a MCSymbol for the label to avoid any target lowering
7740     // that would make this PC relative.
7741     SDValue OffsetSym = DAG.getMCSymbol(FrameAllocSym, PtrVT);
7742     SDValue OffsetVal =
7743         DAG.getNode(ISD::LOCAL_RECOVER, sdl, PtrVT, OffsetSym);
7744 
7745     // Add the offset to the FP.
7746     SDValue Add = DAG.getMemBasePlusOffset(FPVal, OffsetVal, sdl);
7747     setValue(&I, Add);
7748 
7749     return;
7750   }
7751 
7752   case Intrinsic::fake_use: {
7753     Value *V = I.getArgOperand(0);
7754     SDValue Ops[2];
7755     // For Values not declared or previously used in this basic block, the
7756     // NodeMap will not have an entry, and `getValue` will assert if V has no
7757     // valid register value.
7758     auto FakeUseValue = [&]() -> SDValue {
7759       SDValue &N = NodeMap[V];
7760       if (N.getNode())
7761         return N;
7762 
7763       // If there's a virtual register allocated and initialized for this
7764       // value, use it.
7765       if (SDValue copyFromReg = getCopyFromRegs(V, V->getType()))
7766         return copyFromReg;
7767       // FIXME: Do we want to preserve constants? It seems pointless.
7768       if (isa<Constant>(V))
7769         return getValue(V);
7770       return SDValue();
7771     }();
7772     if (!FakeUseValue || FakeUseValue.isUndef())
7773       return;
7774     Ops[0] = getRoot();
7775     Ops[1] = FakeUseValue;
7776     // Also, do not translate a fake use with an undef operand, or any other
7777     // empty SDValues.
7778     if (!Ops[1] || Ops[1].isUndef())
7779       return;
7780     DAG.setRoot(DAG.getNode(ISD::FAKE_USE, sdl, MVT::Other, Ops));
7781     return;
7782   }
7783 
7784   case Intrinsic::eh_exceptionpointer:
7785   case Intrinsic::eh_exceptioncode: {
7786     // Get the exception pointer vreg, copy from it, and resize it to fit.
7787     const auto *CPI = cast<CatchPadInst>(I.getArgOperand(0));
7788     MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
7789     const TargetRegisterClass *PtrRC = TLI.getRegClassFor(PtrVT);
7790     Register VReg = FuncInfo.getCatchPadExceptionPointerVReg(CPI, PtrRC);
7791     SDValue N = DAG.getCopyFromReg(DAG.getEntryNode(), sdl, VReg, PtrVT);
7792     if (Intrinsic == Intrinsic::eh_exceptioncode)
7793       N = DAG.getZExtOrTrunc(N, sdl, MVT::i32);
7794     setValue(&I, N);
7795     return;
7796   }
7797   case Intrinsic::xray_customevent: {
7798     // Here we want to make sure that the intrinsic behaves as if it has a
7799     // specific calling convention.
7800     const auto &Triple = DAG.getTarget().getTargetTriple();
7801     if (!Triple.isAArch64(64) && Triple.getArch() != Triple::x86_64)
7802       return;
7803 
7804     SmallVector<SDValue, 8> Ops;
7805 
7806     // We want to say that we always want the arguments in registers.
7807     SDValue LogEntryVal = getValue(I.getArgOperand(0));
7808     SDValue StrSizeVal = getValue(I.getArgOperand(1));
7809     SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7810     SDValue Chain = getRoot();
7811     Ops.push_back(LogEntryVal);
7812     Ops.push_back(StrSizeVal);
7813     Ops.push_back(Chain);
7814 
7815     // We need to enforce the calling convention for the callsite, so that
7816     // argument ordering is enforced correctly, and that register allocation can
7817     // see that some registers may be assumed clobbered and have to preserve
7818     // them across calls to the intrinsic.
7819     MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHABLE_EVENT_CALL,
7820                                            sdl, NodeTys, Ops);
7821     SDValue patchableNode = SDValue(MN, 0);
7822     DAG.setRoot(patchableNode);
7823     setValue(&I, patchableNode);
7824     return;
7825   }
7826   case Intrinsic::xray_typedevent: {
7827     // Here we want to make sure that the intrinsic behaves as if it has a
7828     // specific calling convention.
7829     const auto &Triple = DAG.getTarget().getTargetTriple();
7830     if (!Triple.isAArch64(64) && Triple.getArch() != Triple::x86_64)
7831       return;
7832 
7833     SmallVector<SDValue, 8> Ops;
7834 
7835     // We want to say that we always want the arguments in registers.
7836     // It's unclear to me how manipulating the selection DAG here forces callers
7837     // to provide arguments in registers instead of on the stack.
7838     SDValue LogTypeId = getValue(I.getArgOperand(0));
7839     SDValue LogEntryVal = getValue(I.getArgOperand(1));
7840     SDValue StrSizeVal = getValue(I.getArgOperand(2));
7841     SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7842     SDValue Chain = getRoot();
7843     Ops.push_back(LogTypeId);
7844     Ops.push_back(LogEntryVal);
7845     Ops.push_back(StrSizeVal);
7846     Ops.push_back(Chain);
7847 
7848     // We need to enforce the calling convention for the callsite, so that
7849     // argument ordering is enforced correctly, and that register allocation can
7850     // see that some registers may be assumed clobbered and have to preserve
7851     // them across calls to the intrinsic.
7852     MachineSDNode *MN = DAG.getMachineNode(
7853         TargetOpcode::PATCHABLE_TYPED_EVENT_CALL, sdl, NodeTys, Ops);
7854     SDValue patchableNode = SDValue(MN, 0);
7855     DAG.setRoot(patchableNode);
7856     setValue(&I, patchableNode);
7857     return;
7858   }
7859   case Intrinsic::experimental_deoptimize:
7860     LowerDeoptimizeCall(&I);
7861     return;
7862   case Intrinsic::stepvector:
7863     visitStepVector(I);
7864     return;
7865   case Intrinsic::vector_reduce_fadd:
7866   case Intrinsic::vector_reduce_fmul:
7867   case Intrinsic::vector_reduce_add:
7868   case Intrinsic::vector_reduce_mul:
7869   case Intrinsic::vector_reduce_and:
7870   case Intrinsic::vector_reduce_or:
7871   case Intrinsic::vector_reduce_xor:
7872   case Intrinsic::vector_reduce_smax:
7873   case Intrinsic::vector_reduce_smin:
7874   case Intrinsic::vector_reduce_umax:
7875   case Intrinsic::vector_reduce_umin:
7876   case Intrinsic::vector_reduce_fmax:
7877   case Intrinsic::vector_reduce_fmin:
7878   case Intrinsic::vector_reduce_fmaximum:
7879   case Intrinsic::vector_reduce_fminimum:
7880     visitVectorReduce(I, Intrinsic);
7881     return;
7882 
7883   case Intrinsic::icall_branch_funnel: {
7884     SmallVector<SDValue, 16> Ops;
7885     Ops.push_back(getValue(I.getArgOperand(0)));
7886 
7887     int64_t Offset;
7888     auto *Base = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
7889         I.getArgOperand(1), Offset, DAG.getDataLayout()));
7890     if (!Base)
7891       report_fatal_error(
7892           "llvm.icall.branch.funnel operand must be a GlobalValue");
7893     Ops.push_back(DAG.getTargetGlobalAddress(Base, sdl, MVT::i64, 0));
7894 
7895     struct BranchFunnelTarget {
7896       int64_t Offset;
7897       SDValue Target;
7898     };
7899     SmallVector<BranchFunnelTarget, 8> Targets;
7900 
7901     for (unsigned Op = 1, N = I.arg_size(); Op != N; Op += 2) {
7902       auto *ElemBase = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
7903           I.getArgOperand(Op), Offset, DAG.getDataLayout()));
7904       if (ElemBase != Base)
7905         report_fatal_error("all llvm.icall.branch.funnel operands must refer "
7906                            "to the same GlobalValue");
7907 
7908       SDValue Val = getValue(I.getArgOperand(Op + 1));
7909       auto *GA = dyn_cast<GlobalAddressSDNode>(Val);
7910       if (!GA)
7911         report_fatal_error(
7912             "llvm.icall.branch.funnel operand must be a GlobalValue");
7913       Targets.push_back({Offset, DAG.getTargetGlobalAddress(
7914                                      GA->getGlobal(), sdl, Val.getValueType(),
7915                                      GA->getOffset())});
7916     }
7917     llvm::sort(Targets,
7918                [](const BranchFunnelTarget &T1, const BranchFunnelTarget &T2) {
7919                  return T1.Offset < T2.Offset;
7920                });
7921 
7922     for (auto &T : Targets) {
7923       Ops.push_back(DAG.getTargetConstant(T.Offset, sdl, MVT::i32));
7924       Ops.push_back(T.Target);
7925     }
7926 
7927     Ops.push_back(DAG.getRoot()); // Chain
7928     SDValue N(DAG.getMachineNode(TargetOpcode::ICALL_BRANCH_FUNNEL, sdl,
7929                                  MVT::Other, Ops),
7930               0);
7931     DAG.setRoot(N);
7932     setValue(&I, N);
7933     HasTailCall = true;
7934     return;
7935   }
7936 
7937   case Intrinsic::wasm_landingpad_index:
7938     // Information this intrinsic contained has been transferred to
7939     // MachineFunction in SelectionDAGISel::PrepareEHLandingPad. We can safely
7940     // delete it now.
7941     return;
7942 
7943   case Intrinsic::aarch64_settag:
7944   case Intrinsic::aarch64_settag_zero: {
7945     const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7946     bool ZeroMemory = Intrinsic == Intrinsic::aarch64_settag_zero;
7947     SDValue Val = TSI.EmitTargetCodeForSetTag(
7948         DAG, sdl, getRoot(), getValue(I.getArgOperand(0)),
7949         getValue(I.getArgOperand(1)), MachinePointerInfo(I.getArgOperand(0)),
7950         ZeroMemory);
7951     DAG.setRoot(Val);
7952     setValue(&I, Val);
7953     return;
7954   }
7955   case Intrinsic::amdgcn_cs_chain: {
7956     assert(I.arg_size() == 5 && "Additional args not supported yet");
7957     assert(cast<ConstantInt>(I.getOperand(4))->isZero() &&
7958            "Non-zero flags not supported yet");
7959 
7960     // At this point we don't care if it's amdgpu_cs_chain or
7961     // amdgpu_cs_chain_preserve.
7962     CallingConv::ID CC = CallingConv::AMDGPU_CS_Chain;
7963 
7964     Type *RetTy = I.getType();
7965     assert(RetTy->isVoidTy() && "Should not return");
7966 
7967     SDValue Callee = getValue(I.getOperand(0));
7968 
7969     // We only have 2 actual args: one for the SGPRs and one for the VGPRs.
7970     // We'll also tack the value of the EXEC mask at the end.
7971     TargetLowering::ArgListTy Args;
7972     Args.reserve(3);
7973 
7974     for (unsigned Idx : {2, 3, 1}) {
7975       TargetLowering::ArgListEntry Arg;
7976       Arg.Node = getValue(I.getOperand(Idx));
7977       Arg.Ty = I.getOperand(Idx)->getType();
7978       Arg.setAttributes(&I, Idx);
7979       Args.push_back(Arg);
7980     }
7981 
7982     assert(Args[0].IsInReg && "SGPR args should be marked inreg");
7983     assert(!Args[1].IsInReg && "VGPR args should not be marked inreg");
7984     Args[2].IsInReg = true; // EXEC should be inreg
7985 
7986     TargetLowering::CallLoweringInfo CLI(DAG);
7987     CLI.setDebugLoc(getCurSDLoc())
7988         .setChain(getRoot())
7989         .setCallee(CC, RetTy, Callee, std::move(Args))
7990         .setNoReturn(true)
7991         .setTailCall(true)
7992         .setConvergent(I.isConvergent());
7993     CLI.CB = &I;
7994     std::pair<SDValue, SDValue> Result =
7995         lowerInvokable(CLI, /*EHPadBB*/ nullptr);
7996     (void)Result;
7997     assert(!Result.first.getNode() && !Result.second.getNode() &&
7998            "Should've lowered as tail call");
7999 
8000     HasTailCall = true;
8001     return;
8002   }
8003   case Intrinsic::ptrmask: {
8004     SDValue Ptr = getValue(I.getOperand(0));
8005     SDValue Mask = getValue(I.getOperand(1));
8006 
8007     // On arm64_32, pointers are 32 bits when stored in memory, but
8008     // zero-extended to 64 bits when in registers.  Thus the mask is 32 bits to
8009     // match the index type, but the pointer is 64 bits, so the the mask must be
8010     // zero-extended up to 64 bits to match the pointer.
8011     EVT PtrVT =
8012         TLI.getValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
8013     EVT MemVT =
8014         TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
8015     assert(PtrVT == Ptr.getValueType());
8016     assert(MemVT == Mask.getValueType());
8017     if (MemVT != PtrVT)
8018       Mask = DAG.getPtrExtOrTrunc(Mask, sdl, PtrVT);
8019 
8020     setValue(&I, DAG.getNode(ISD::AND, sdl, PtrVT, Ptr, Mask));
8021     return;
8022   }
8023   case Intrinsic::threadlocal_address: {
8024     setValue(&I, getValue(I.getOperand(0)));
8025     return;
8026   }
8027   case Intrinsic::get_active_lane_mask: {
8028     EVT CCVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
8029     SDValue Index = getValue(I.getOperand(0));
8030     EVT ElementVT = Index.getValueType();
8031 
8032     if (!TLI.shouldExpandGetActiveLaneMask(CCVT, ElementVT)) {
8033       visitTargetIntrinsic(I, Intrinsic);
8034       return;
8035     }
8036 
8037     SDValue TripCount = getValue(I.getOperand(1));
8038     EVT VecTy = EVT::getVectorVT(*DAG.getContext(), ElementVT,
8039                                  CCVT.getVectorElementCount());
8040 
8041     SDValue VectorIndex = DAG.getSplat(VecTy, sdl, Index);
8042     SDValue VectorTripCount = DAG.getSplat(VecTy, sdl, TripCount);
8043     SDValue VectorStep = DAG.getStepVector(sdl, VecTy);
8044     SDValue VectorInduction = DAG.getNode(
8045         ISD::UADDSAT, sdl, VecTy, VectorIndex, VectorStep);
8046     SDValue SetCC = DAG.getSetCC(sdl, CCVT, VectorInduction,
8047                                  VectorTripCount, ISD::CondCode::SETULT);
8048     setValue(&I, SetCC);
8049     return;
8050   }
8051   case Intrinsic::experimental_get_vector_length: {
8052     assert(cast<ConstantInt>(I.getOperand(1))->getSExtValue() > 0 &&
8053            "Expected positive VF");
8054     unsigned VF = cast<ConstantInt>(I.getOperand(1))->getZExtValue();
8055     bool IsScalable = cast<ConstantInt>(I.getOperand(2))->isOne();
8056 
8057     SDValue Count = getValue(I.getOperand(0));
8058     EVT CountVT = Count.getValueType();
8059 
8060     if (!TLI.shouldExpandGetVectorLength(CountVT, VF, IsScalable)) {
8061       visitTargetIntrinsic(I, Intrinsic);
8062       return;
8063     }
8064 
8065     // Expand to a umin between the trip count and the maximum elements the type
8066     // can hold.
8067     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
8068 
8069     // Extend the trip count to at least the result VT.
8070     if (CountVT.bitsLT(VT)) {
8071       Count = DAG.getNode(ISD::ZERO_EXTEND, sdl, VT, Count);
8072       CountVT = VT;
8073     }
8074 
8075     SDValue MaxEVL = DAG.getElementCount(sdl, CountVT,
8076                                          ElementCount::get(VF, IsScalable));
8077 
8078     SDValue UMin = DAG.getNode(ISD::UMIN, sdl, CountVT, Count, MaxEVL);
8079     // Clip to the result type if needed.
8080     SDValue Trunc = DAG.getNode(ISD::TRUNCATE, sdl, VT, UMin);
8081 
8082     setValue(&I, Trunc);
8083     return;
8084   }
8085   case Intrinsic::experimental_vector_partial_reduce_add: {
8086 
8087     if (!TLI.shouldExpandPartialReductionIntrinsic(cast<IntrinsicInst>(&I))) {
8088       visitTargetIntrinsic(I, Intrinsic);
8089       return;
8090     }
8091 
8092     setValue(&I, DAG.getPartialReduceAdd(sdl, EVT::getEVT(I.getType()),
8093                                          getValue(I.getOperand(0)),
8094                                          getValue(I.getOperand(1))));
8095     return;
8096   }
8097   case Intrinsic::experimental_cttz_elts: {
8098     auto DL = getCurSDLoc();
8099     SDValue Op = getValue(I.getOperand(0));
8100     EVT OpVT = Op.getValueType();
8101 
8102     if (!TLI.shouldExpandCttzElements(OpVT)) {
8103       visitTargetIntrinsic(I, Intrinsic);
8104       return;
8105     }
8106 
8107     if (OpVT.getScalarType() != MVT::i1) {
8108       // Compare the input vector elements to zero & use to count trailing zeros
8109       SDValue AllZero = DAG.getConstant(0, DL, OpVT);
8110       OpVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
8111                               OpVT.getVectorElementCount());
8112       Op = DAG.getSetCC(DL, OpVT, Op, AllZero, ISD::SETNE);
8113     }
8114 
8115     // If the zero-is-poison flag is set, we can assume the upper limit
8116     // of the result is VF-1.
8117     bool ZeroIsPoison =
8118         !cast<ConstantSDNode>(getValue(I.getOperand(1)))->isZero();
8119     ConstantRange VScaleRange(1, true); // Dummy value.
8120     if (isa<ScalableVectorType>(I.getOperand(0)->getType()))
8121       VScaleRange = getVScaleRange(I.getCaller(), 64);
8122     unsigned EltWidth = TLI.getBitWidthForCttzElements(
8123         I.getType(), OpVT.getVectorElementCount(), ZeroIsPoison, &VScaleRange);
8124 
8125     MVT NewEltTy = MVT::getIntegerVT(EltWidth);
8126 
8127     // Create the new vector type & get the vector length
8128     EVT NewVT = EVT::getVectorVT(*DAG.getContext(), NewEltTy,
8129                                  OpVT.getVectorElementCount());
8130 
8131     SDValue VL =
8132         DAG.getElementCount(DL, NewEltTy, OpVT.getVectorElementCount());
8133 
8134     SDValue StepVec = DAG.getStepVector(DL, NewVT);
8135     SDValue SplatVL = DAG.getSplat(NewVT, DL, VL);
8136     SDValue StepVL = DAG.getNode(ISD::SUB, DL, NewVT, SplatVL, StepVec);
8137     SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, Op);
8138     SDValue And = DAG.getNode(ISD::AND, DL, NewVT, StepVL, Ext);
8139     SDValue Max = DAG.getNode(ISD::VECREDUCE_UMAX, DL, NewEltTy, And);
8140     SDValue Sub = DAG.getNode(ISD::SUB, DL, NewEltTy, VL, Max);
8141 
8142     EVT RetTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
8143     SDValue Ret = DAG.getZExtOrTrunc(Sub, DL, RetTy);
8144 
8145     setValue(&I, Ret);
8146     return;
8147   }
8148   case Intrinsic::vector_insert: {
8149     SDValue Vec = getValue(I.getOperand(0));
8150     SDValue SubVec = getValue(I.getOperand(1));
8151     SDValue Index = getValue(I.getOperand(2));
8152 
8153     // The intrinsic's index type is i64, but the SDNode requires an index type
8154     // suitable for the target. Convert the index as required.
8155     MVT VectorIdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
8156     if (Index.getValueType() != VectorIdxTy)
8157       Index = DAG.getVectorIdxConstant(Index->getAsZExtVal(), sdl);
8158 
8159     EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
8160     setValue(&I, DAG.getNode(ISD::INSERT_SUBVECTOR, sdl, ResultVT, Vec, SubVec,
8161                              Index));
8162     return;
8163   }
8164   case Intrinsic::vector_extract: {
8165     SDValue Vec = getValue(I.getOperand(0));
8166     SDValue Index = getValue(I.getOperand(1));
8167     EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
8168 
8169     // The intrinsic's index type is i64, but the SDNode requires an index type
8170     // suitable for the target. Convert the index as required.
8171     MVT VectorIdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
8172     if (Index.getValueType() != VectorIdxTy)
8173       Index = DAG.getVectorIdxConstant(Index->getAsZExtVal(), sdl);
8174 
8175     setValue(&I,
8176              DAG.getNode(ISD::EXTRACT_SUBVECTOR, sdl, ResultVT, Vec, Index));
8177     return;
8178   }
8179   case Intrinsic::vector_reverse:
8180     visitVectorReverse(I);
8181     return;
8182   case Intrinsic::vector_splice:
8183     visitVectorSplice(I);
8184     return;
8185   case Intrinsic::callbr_landingpad:
8186     visitCallBrLandingPad(I);
8187     return;
8188   case Intrinsic::vector_interleave2:
8189     visitVectorInterleave(I);
8190     return;
8191   case Intrinsic::vector_deinterleave2:
8192     visitVectorDeinterleave(I);
8193     return;
8194   case Intrinsic::experimental_vector_compress:
8195     setValue(&I, DAG.getNode(ISD::VECTOR_COMPRESS, sdl,
8196                              getValue(I.getArgOperand(0)).getValueType(),
8197                              getValue(I.getArgOperand(0)),
8198                              getValue(I.getArgOperand(1)),
8199                              getValue(I.getArgOperand(2)), Flags));
8200     return;
8201   case Intrinsic::experimental_convergence_anchor:
8202   case Intrinsic::experimental_convergence_entry:
8203   case Intrinsic::experimental_convergence_loop:
8204     visitConvergenceControl(I, Intrinsic);
8205     return;
8206   case Intrinsic::experimental_vector_histogram_add: {
8207     visitVectorHistogram(I, Intrinsic);
8208     return;
8209   }
8210   }
8211 }
8212 
8213 void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
8214     const ConstrainedFPIntrinsic &FPI) {
8215   SDLoc sdl = getCurSDLoc();
8216 
8217   // We do not need to serialize constrained FP intrinsics against
8218   // each other or against (nonvolatile) loads, so they can be
8219   // chained like loads.
8220   SDValue Chain = DAG.getRoot();
8221   SmallVector<SDValue, 4> Opers;
8222   Opers.push_back(Chain);
8223   for (unsigned I = 0, E = FPI.getNonMetadataArgCount(); I != E; ++I)
8224     Opers.push_back(getValue(FPI.getArgOperand(I)));
8225 
8226   auto pushOutChain = [this](SDValue Result, fp::ExceptionBehavior EB) {
8227     assert(Result.getNode()->getNumValues() == 2);
8228 
8229     // Push node to the appropriate list so that future instructions can be
8230     // chained up correctly.
8231     SDValue OutChain = Result.getValue(1);
8232     switch (EB) {
8233     case fp::ExceptionBehavior::ebIgnore:
8234       // The only reason why ebIgnore nodes still need to be chained is that
8235       // they might depend on the current rounding mode, and therefore must
8236       // not be moved across instruction that may change that mode.
8237       [[fallthrough]];
8238     case fp::ExceptionBehavior::ebMayTrap:
8239       // These must not be moved across calls or instructions that may change
8240       // floating-point exception masks.
8241       PendingConstrainedFP.push_back(OutChain);
8242       break;
8243     case fp::ExceptionBehavior::ebStrict:
8244       // These must not be moved across calls or instructions that may change
8245       // floating-point exception masks or read floating-point exception flags.
8246       // In addition, they cannot be optimized out even if unused.
8247       PendingConstrainedFPStrict.push_back(OutChain);
8248       break;
8249     }
8250   };
8251 
8252   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8253   EVT VT = TLI.getValueType(DAG.getDataLayout(), FPI.getType());
8254   SDVTList VTs = DAG.getVTList(VT, MVT::Other);
8255   fp::ExceptionBehavior EB = *FPI.getExceptionBehavior();
8256 
8257   SDNodeFlags Flags;
8258   if (EB == fp::ExceptionBehavior::ebIgnore)
8259     Flags.setNoFPExcept(true);
8260 
8261   if (auto *FPOp = dyn_cast<FPMathOperator>(&FPI))
8262     Flags.copyFMF(*FPOp);
8263 
8264   unsigned Opcode;
8265   switch (FPI.getIntrinsicID()) {
8266   default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
8267 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
8268   case Intrinsic::INTRINSIC:                                                   \
8269     Opcode = ISD::STRICT_##DAGN;                                               \
8270     break;
8271 #include "llvm/IR/ConstrainedOps.def"
8272   case Intrinsic::experimental_constrained_fmuladd: {
8273     Opcode = ISD::STRICT_FMA;
8274     // Break fmuladd into fmul and fadd.
8275     if (TM.Options.AllowFPOpFusion == FPOpFusion::Strict ||
8276         !TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT)) {
8277       Opers.pop_back();
8278       SDValue Mul = DAG.getNode(ISD::STRICT_FMUL, sdl, VTs, Opers, Flags);
8279       pushOutChain(Mul, EB);
8280       Opcode = ISD::STRICT_FADD;
8281       Opers.clear();
8282       Opers.push_back(Mul.getValue(1));
8283       Opers.push_back(Mul.getValue(0));
8284       Opers.push_back(getValue(FPI.getArgOperand(2)));
8285     }
8286     break;
8287   }
8288   }
8289 
8290   // A few strict DAG nodes carry additional operands that are not
8291   // set up by the default code above.
8292   switch (Opcode) {
8293   default: break;
8294   case ISD::STRICT_FP_ROUND:
8295     Opers.push_back(
8296         DAG.getTargetConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout())));
8297     break;
8298   case ISD::STRICT_FSETCC:
8299   case ISD::STRICT_FSETCCS: {
8300     auto *FPCmp = dyn_cast<ConstrainedFPCmpIntrinsic>(&FPI);
8301     ISD::CondCode Condition = getFCmpCondCode(FPCmp->getPredicate());
8302     if (TM.Options.NoNaNsFPMath)
8303       Condition = getFCmpCodeWithoutNaN(Condition);
8304     Opers.push_back(DAG.getCondCode(Condition));
8305     break;
8306   }
8307   }
8308 
8309   SDValue Result = DAG.getNode(Opcode, sdl, VTs, Opers, Flags);
8310   pushOutChain(Result, EB);
8311 
8312   SDValue FPResult = Result.getValue(0);
8313   setValue(&FPI, FPResult);
8314 }
8315 
8316 static unsigned getISDForVPIntrinsic(const VPIntrinsic &VPIntrin) {
8317   std::optional<unsigned> ResOPC;
8318   switch (VPIntrin.getIntrinsicID()) {
8319   case Intrinsic::vp_ctlz: {
8320     bool IsZeroUndef = cast<ConstantInt>(VPIntrin.getArgOperand(1))->isOne();
8321     ResOPC = IsZeroUndef ? ISD::VP_CTLZ_ZERO_UNDEF : ISD::VP_CTLZ;
8322     break;
8323   }
8324   case Intrinsic::vp_cttz: {
8325     bool IsZeroUndef = cast<ConstantInt>(VPIntrin.getArgOperand(1))->isOne();
8326     ResOPC = IsZeroUndef ? ISD::VP_CTTZ_ZERO_UNDEF : ISD::VP_CTTZ;
8327     break;
8328   }
8329   case Intrinsic::vp_cttz_elts: {
8330     bool IsZeroPoison = cast<ConstantInt>(VPIntrin.getArgOperand(1))->isOne();
8331     ResOPC = IsZeroPoison ? ISD::VP_CTTZ_ELTS_ZERO_UNDEF : ISD::VP_CTTZ_ELTS;
8332     break;
8333   }
8334 #define HELPER_MAP_VPID_TO_VPSD(VPID, VPSD)                                    \
8335   case Intrinsic::VPID:                                                        \
8336     ResOPC = ISD::VPSD;                                                        \
8337     break;
8338 #include "llvm/IR/VPIntrinsics.def"
8339   }
8340 
8341   if (!ResOPC)
8342     llvm_unreachable(
8343         "Inconsistency: no SDNode available for this VPIntrinsic!");
8344 
8345   if (*ResOPC == ISD::VP_REDUCE_SEQ_FADD ||
8346       *ResOPC == ISD::VP_REDUCE_SEQ_FMUL) {
8347     if (VPIntrin.getFastMathFlags().allowReassoc())
8348       return *ResOPC == ISD::VP_REDUCE_SEQ_FADD ? ISD::VP_REDUCE_FADD
8349                                                 : ISD::VP_REDUCE_FMUL;
8350   }
8351 
8352   return *ResOPC;
8353 }
8354 
8355 void SelectionDAGBuilder::visitVPLoad(
8356     const VPIntrinsic &VPIntrin, EVT VT,
8357     const SmallVectorImpl<SDValue> &OpValues) {
8358   SDLoc DL = getCurSDLoc();
8359   Value *PtrOperand = VPIntrin.getArgOperand(0);
8360   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
8361   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
8362   const MDNode *Ranges = getRangeMetadata(VPIntrin);
8363   SDValue LD;
8364   // Do not serialize variable-length loads of constant memory with
8365   // anything.
8366   if (!Alignment)
8367     Alignment = DAG.getEVTAlign(VT);
8368   MemoryLocation ML = MemoryLocation::getAfter(PtrOperand, AAInfo);
8369   bool AddToChain = !AA || !AA->pointsToConstantMemory(ML);
8370   SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
8371   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
8372       MachinePointerInfo(PtrOperand), MachineMemOperand::MOLoad,
8373       LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo, Ranges);
8374   LD = DAG.getLoadVP(VT, DL, InChain, OpValues[0], OpValues[1], OpValues[2],
8375                      MMO, false /*IsExpanding */);
8376   if (AddToChain)
8377     PendingLoads.push_back(LD.getValue(1));
8378   setValue(&VPIntrin, LD);
8379 }
8380 
8381 void SelectionDAGBuilder::visitVPGather(
8382     const VPIntrinsic &VPIntrin, EVT VT,
8383     const SmallVectorImpl<SDValue> &OpValues) {
8384   SDLoc DL = getCurSDLoc();
8385   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8386   Value *PtrOperand = VPIntrin.getArgOperand(0);
8387   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
8388   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
8389   const MDNode *Ranges = getRangeMetadata(VPIntrin);
8390   SDValue LD;
8391   if (!Alignment)
8392     Alignment = DAG.getEVTAlign(VT.getScalarType());
8393   unsigned AS =
8394     PtrOperand->getType()->getScalarType()->getPointerAddressSpace();
8395   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
8396       MachinePointerInfo(AS), MachineMemOperand::MOLoad,
8397       LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo, Ranges);
8398   SDValue Base, Index, Scale;
8399   ISD::MemIndexType IndexType;
8400   bool UniformBase = getUniformBase(PtrOperand, Base, Index, IndexType, Scale,
8401                                     this, VPIntrin.getParent(),
8402                                     VT.getScalarStoreSize());
8403   if (!UniformBase) {
8404     Base = DAG.getConstant(0, DL, TLI.getPointerTy(DAG.getDataLayout()));
8405     Index = getValue(PtrOperand);
8406     IndexType = ISD::SIGNED_SCALED;
8407     Scale = DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout()));
8408   }
8409   EVT IdxVT = Index.getValueType();
8410   EVT EltTy = IdxVT.getVectorElementType();
8411   if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
8412     EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
8413     Index = DAG.getNode(ISD::SIGN_EXTEND, DL, NewIdxVT, Index);
8414   }
8415   LD = DAG.getGatherVP(
8416       DAG.getVTList(VT, MVT::Other), VT, DL,
8417       {DAG.getRoot(), Base, Index, Scale, OpValues[1], OpValues[2]}, MMO,
8418       IndexType);
8419   PendingLoads.push_back(LD.getValue(1));
8420   setValue(&VPIntrin, LD);
8421 }
8422 
8423 void SelectionDAGBuilder::visitVPStore(
8424     const VPIntrinsic &VPIntrin, const SmallVectorImpl<SDValue> &OpValues) {
8425   SDLoc DL = getCurSDLoc();
8426   Value *PtrOperand = VPIntrin.getArgOperand(1);
8427   EVT VT = OpValues[0].getValueType();
8428   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
8429   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
8430   SDValue ST;
8431   if (!Alignment)
8432     Alignment = DAG.getEVTAlign(VT);
8433   SDValue Ptr = OpValues[1];
8434   SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
8435   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
8436       MachinePointerInfo(PtrOperand), MachineMemOperand::MOStore,
8437       LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo);
8438   ST = DAG.getStoreVP(getMemoryRoot(), DL, OpValues[0], Ptr, Offset,
8439                       OpValues[2], OpValues[3], VT, MMO, ISD::UNINDEXED,
8440                       /* IsTruncating */ false, /*IsCompressing*/ false);
8441   DAG.setRoot(ST);
8442   setValue(&VPIntrin, ST);
8443 }
8444 
8445 void SelectionDAGBuilder::visitVPScatter(
8446     const VPIntrinsic &VPIntrin, const SmallVectorImpl<SDValue> &OpValues) {
8447   SDLoc DL = getCurSDLoc();
8448   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8449   Value *PtrOperand = VPIntrin.getArgOperand(1);
8450   EVT VT = OpValues[0].getValueType();
8451   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
8452   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
8453   SDValue ST;
8454   if (!Alignment)
8455     Alignment = DAG.getEVTAlign(VT.getScalarType());
8456   unsigned AS =
8457       PtrOperand->getType()->getScalarType()->getPointerAddressSpace();
8458   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
8459       MachinePointerInfo(AS), MachineMemOperand::MOStore,
8460       LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo);
8461   SDValue Base, Index, Scale;
8462   ISD::MemIndexType IndexType;
8463   bool UniformBase = getUniformBase(PtrOperand, Base, Index, IndexType, Scale,
8464                                     this, VPIntrin.getParent(),
8465                                     VT.getScalarStoreSize());
8466   if (!UniformBase) {
8467     Base = DAG.getConstant(0, DL, TLI.getPointerTy(DAG.getDataLayout()));
8468     Index = getValue(PtrOperand);
8469     IndexType = ISD::SIGNED_SCALED;
8470     Scale =
8471       DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout()));
8472   }
8473   EVT IdxVT = Index.getValueType();
8474   EVT EltTy = IdxVT.getVectorElementType();
8475   if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
8476     EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
8477     Index = DAG.getNode(ISD::SIGN_EXTEND, DL, NewIdxVT, Index);
8478   }
8479   ST = DAG.getScatterVP(DAG.getVTList(MVT::Other), VT, DL,
8480                         {getMemoryRoot(), OpValues[0], Base, Index, Scale,
8481                          OpValues[2], OpValues[3]},
8482                         MMO, IndexType);
8483   DAG.setRoot(ST);
8484   setValue(&VPIntrin, ST);
8485 }
8486 
8487 void SelectionDAGBuilder::visitVPStridedLoad(
8488     const VPIntrinsic &VPIntrin, EVT VT,
8489     const SmallVectorImpl<SDValue> &OpValues) {
8490   SDLoc DL = getCurSDLoc();
8491   Value *PtrOperand = VPIntrin.getArgOperand(0);
8492   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
8493   if (!Alignment)
8494     Alignment = DAG.getEVTAlign(VT.getScalarType());
8495   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
8496   const MDNode *Ranges = getRangeMetadata(VPIntrin);
8497   MemoryLocation ML = MemoryLocation::getAfter(PtrOperand, AAInfo);
8498   bool AddToChain = !AA || !AA->pointsToConstantMemory(ML);
8499   SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
8500   unsigned AS = PtrOperand->getType()->getPointerAddressSpace();
8501   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
8502       MachinePointerInfo(AS), MachineMemOperand::MOLoad,
8503       LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo, Ranges);
8504 
8505   SDValue LD = DAG.getStridedLoadVP(VT, DL, InChain, OpValues[0], OpValues[1],
8506                                     OpValues[2], OpValues[3], MMO,
8507                                     false /*IsExpanding*/);
8508 
8509   if (AddToChain)
8510     PendingLoads.push_back(LD.getValue(1));
8511   setValue(&VPIntrin, LD);
8512 }
8513 
8514 void SelectionDAGBuilder::visitVPStridedStore(
8515     const VPIntrinsic &VPIntrin, const SmallVectorImpl<SDValue> &OpValues) {
8516   SDLoc DL = getCurSDLoc();
8517   Value *PtrOperand = VPIntrin.getArgOperand(1);
8518   EVT VT = OpValues[0].getValueType();
8519   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
8520   if (!Alignment)
8521     Alignment = DAG.getEVTAlign(VT.getScalarType());
8522   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
8523   unsigned AS = PtrOperand->getType()->getPointerAddressSpace();
8524   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
8525       MachinePointerInfo(AS), MachineMemOperand::MOStore,
8526       LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo);
8527 
8528   SDValue ST = DAG.getStridedStoreVP(
8529       getMemoryRoot(), DL, OpValues[0], OpValues[1],
8530       DAG.getUNDEF(OpValues[1].getValueType()), OpValues[2], OpValues[3],
8531       OpValues[4], VT, MMO, ISD::UNINDEXED, /*IsTruncating*/ false,
8532       /*IsCompressing*/ false);
8533 
8534   DAG.setRoot(ST);
8535   setValue(&VPIntrin, ST);
8536 }
8537 
8538 void SelectionDAGBuilder::visitVPCmp(const VPCmpIntrinsic &VPIntrin) {
8539   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8540   SDLoc DL = getCurSDLoc();
8541 
8542   ISD::CondCode Condition;
8543   CmpInst::Predicate CondCode = VPIntrin.getPredicate();
8544   bool IsFP = VPIntrin.getOperand(0)->getType()->isFPOrFPVectorTy();
8545   if (IsFP) {
8546     // FIXME: Regular fcmps are FPMathOperators which may have fast-math (nnan)
8547     // flags, but calls that don't return floating-point types can't be
8548     // FPMathOperators, like vp.fcmp. This affects constrained fcmp too.
8549     Condition = getFCmpCondCode(CondCode);
8550     if (TM.Options.NoNaNsFPMath)
8551       Condition = getFCmpCodeWithoutNaN(Condition);
8552   } else {
8553     Condition = getICmpCondCode(CondCode);
8554   }
8555 
8556   SDValue Op1 = getValue(VPIntrin.getOperand(0));
8557   SDValue Op2 = getValue(VPIntrin.getOperand(1));
8558   // #2 is the condition code
8559   SDValue MaskOp = getValue(VPIntrin.getOperand(3));
8560   SDValue EVL = getValue(VPIntrin.getOperand(4));
8561   MVT EVLParamVT = TLI.getVPExplicitVectorLengthTy();
8562   assert(EVLParamVT.isScalarInteger() && EVLParamVT.bitsGE(MVT::i32) &&
8563          "Unexpected target EVL type");
8564   EVL = DAG.getNode(ISD::ZERO_EXTEND, DL, EVLParamVT, EVL);
8565 
8566   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
8567                                                         VPIntrin.getType());
8568   setValue(&VPIntrin,
8569            DAG.getSetCCVP(DL, DestVT, Op1, Op2, Condition, MaskOp, EVL));
8570 }
8571 
8572 void SelectionDAGBuilder::visitVectorPredicationIntrinsic(
8573     const VPIntrinsic &VPIntrin) {
8574   SDLoc DL = getCurSDLoc();
8575   unsigned Opcode = getISDForVPIntrinsic(VPIntrin);
8576 
8577   auto IID = VPIntrin.getIntrinsicID();
8578 
8579   if (const auto *CmpI = dyn_cast<VPCmpIntrinsic>(&VPIntrin))
8580     return visitVPCmp(*CmpI);
8581 
8582   SmallVector<EVT, 4> ValueVTs;
8583   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8584   ComputeValueVTs(TLI, DAG.getDataLayout(), VPIntrin.getType(), ValueVTs);
8585   SDVTList VTs = DAG.getVTList(ValueVTs);
8586 
8587   auto EVLParamPos = VPIntrinsic::getVectorLengthParamPos(IID);
8588 
8589   MVT EVLParamVT = TLI.getVPExplicitVectorLengthTy();
8590   assert(EVLParamVT.isScalarInteger() && EVLParamVT.bitsGE(MVT::i32) &&
8591          "Unexpected target EVL type");
8592 
8593   // Request operands.
8594   SmallVector<SDValue, 7> OpValues;
8595   for (unsigned I = 0; I < VPIntrin.arg_size(); ++I) {
8596     auto Op = getValue(VPIntrin.getArgOperand(I));
8597     if (I == EVLParamPos)
8598       Op = DAG.getNode(ISD::ZERO_EXTEND, DL, EVLParamVT, Op);
8599     OpValues.push_back(Op);
8600   }
8601 
8602   switch (Opcode) {
8603   default: {
8604     SDNodeFlags SDFlags;
8605     if (auto *FPMO = dyn_cast<FPMathOperator>(&VPIntrin))
8606       SDFlags.copyFMF(*FPMO);
8607     SDValue Result = DAG.getNode(Opcode, DL, VTs, OpValues, SDFlags);
8608     setValue(&VPIntrin, Result);
8609     break;
8610   }
8611   case ISD::VP_LOAD:
8612     visitVPLoad(VPIntrin, ValueVTs[0], OpValues);
8613     break;
8614   case ISD::VP_GATHER:
8615     visitVPGather(VPIntrin, ValueVTs[0], OpValues);
8616     break;
8617   case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
8618     visitVPStridedLoad(VPIntrin, ValueVTs[0], OpValues);
8619     break;
8620   case ISD::VP_STORE:
8621     visitVPStore(VPIntrin, OpValues);
8622     break;
8623   case ISD::VP_SCATTER:
8624     visitVPScatter(VPIntrin, OpValues);
8625     break;
8626   case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
8627     visitVPStridedStore(VPIntrin, OpValues);
8628     break;
8629   case ISD::VP_FMULADD: {
8630     assert(OpValues.size() == 5 && "Unexpected number of operands");
8631     SDNodeFlags SDFlags;
8632     if (auto *FPMO = dyn_cast<FPMathOperator>(&VPIntrin))
8633       SDFlags.copyFMF(*FPMO);
8634     if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
8635         TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), ValueVTs[0])) {
8636       setValue(&VPIntrin, DAG.getNode(ISD::VP_FMA, DL, VTs, OpValues, SDFlags));
8637     } else {
8638       SDValue Mul = DAG.getNode(
8639           ISD::VP_FMUL, DL, VTs,
8640           {OpValues[0], OpValues[1], OpValues[3], OpValues[4]}, SDFlags);
8641       SDValue Add =
8642           DAG.getNode(ISD::VP_FADD, DL, VTs,
8643                       {Mul, OpValues[2], OpValues[3], OpValues[4]}, SDFlags);
8644       setValue(&VPIntrin, Add);
8645     }
8646     break;
8647   }
8648   case ISD::VP_IS_FPCLASS: {
8649     const DataLayout DLayout = DAG.getDataLayout();
8650     EVT DestVT = TLI.getValueType(DLayout, VPIntrin.getType());
8651     auto Constant = OpValues[1]->getAsZExtVal();
8652     SDValue Check = DAG.getTargetConstant(Constant, DL, MVT::i32);
8653     SDValue V = DAG.getNode(ISD::VP_IS_FPCLASS, DL, DestVT,
8654                             {OpValues[0], Check, OpValues[2], OpValues[3]});
8655     setValue(&VPIntrin, V);
8656     return;
8657   }
8658   case ISD::VP_INTTOPTR: {
8659     SDValue N = OpValues[0];
8660     EVT DestVT = TLI.getValueType(DAG.getDataLayout(), VPIntrin.getType());
8661     EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(), VPIntrin.getType());
8662     N = DAG.getVPPtrExtOrTrunc(getCurSDLoc(), DestVT, N, OpValues[1],
8663                                OpValues[2]);
8664     N = DAG.getVPZExtOrTrunc(getCurSDLoc(), PtrMemVT, N, OpValues[1],
8665                              OpValues[2]);
8666     setValue(&VPIntrin, N);
8667     break;
8668   }
8669   case ISD::VP_PTRTOINT: {
8670     SDValue N = OpValues[0];
8671     EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
8672                                                           VPIntrin.getType());
8673     EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(),
8674                                        VPIntrin.getOperand(0)->getType());
8675     N = DAG.getVPPtrExtOrTrunc(getCurSDLoc(), PtrMemVT, N, OpValues[1],
8676                                OpValues[2]);
8677     N = DAG.getVPZExtOrTrunc(getCurSDLoc(), DestVT, N, OpValues[1],
8678                              OpValues[2]);
8679     setValue(&VPIntrin, N);
8680     break;
8681   }
8682   case ISD::VP_ABS:
8683   case ISD::VP_CTLZ:
8684   case ISD::VP_CTLZ_ZERO_UNDEF:
8685   case ISD::VP_CTTZ:
8686   case ISD::VP_CTTZ_ZERO_UNDEF:
8687   case ISD::VP_CTTZ_ELTS_ZERO_UNDEF:
8688   case ISD::VP_CTTZ_ELTS: {
8689     SDValue Result =
8690         DAG.getNode(Opcode, DL, VTs, {OpValues[0], OpValues[2], OpValues[3]});
8691     setValue(&VPIntrin, Result);
8692     break;
8693   }
8694   }
8695 }
8696 
8697 SDValue SelectionDAGBuilder::lowerStartEH(SDValue Chain,
8698                                           const BasicBlock *EHPadBB,
8699                                           MCSymbol *&BeginLabel) {
8700   MachineFunction &MF = DAG.getMachineFunction();
8701 
8702   // Insert a label before the invoke call to mark the try range.  This can be
8703   // used to detect deletion of the invoke via the MachineModuleInfo.
8704   BeginLabel = MF.getContext().createTempSymbol();
8705 
8706   // For SjLj, keep track of which landing pads go with which invokes
8707   // so as to maintain the ordering of pads in the LSDA.
8708   unsigned CallSiteIndex = FuncInfo.getCurrentCallSite();
8709   if (CallSiteIndex) {
8710     MF.setCallSiteBeginLabel(BeginLabel, CallSiteIndex);
8711     LPadToCallSiteMap[FuncInfo.getMBB(EHPadBB)].push_back(CallSiteIndex);
8712 
8713     // Now that the call site is handled, stop tracking it.
8714     FuncInfo.setCurrentCallSite(0);
8715   }
8716 
8717   return DAG.getEHLabel(getCurSDLoc(), Chain, BeginLabel);
8718 }
8719 
8720 SDValue SelectionDAGBuilder::lowerEndEH(SDValue Chain, const InvokeInst *II,
8721                                         const BasicBlock *EHPadBB,
8722                                         MCSymbol *BeginLabel) {
8723   assert(BeginLabel && "BeginLabel should've been set");
8724 
8725   MachineFunction &MF = DAG.getMachineFunction();
8726 
8727   // Insert a label at the end of the invoke call to mark the try range.  This
8728   // can be used to detect deletion of the invoke via the MachineModuleInfo.
8729   MCSymbol *EndLabel = MF.getContext().createTempSymbol();
8730   Chain = DAG.getEHLabel(getCurSDLoc(), Chain, EndLabel);
8731 
8732   // Inform MachineModuleInfo of range.
8733   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
8734   // There is a platform (e.g. wasm) that uses funclet style IR but does not
8735   // actually use outlined funclets and their LSDA info style.
8736   if (MF.hasEHFunclets() && isFuncletEHPersonality(Pers)) {
8737     assert(II && "II should've been set");
8738     WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
8739     EHInfo->addIPToStateRange(II, BeginLabel, EndLabel);
8740   } else if (!isScopedEHPersonality(Pers)) {
8741     assert(EHPadBB);
8742     MF.addInvoke(FuncInfo.getMBB(EHPadBB), BeginLabel, EndLabel);
8743   }
8744 
8745   return Chain;
8746 }
8747 
8748 std::pair<SDValue, SDValue>
8749 SelectionDAGBuilder::lowerInvokable(TargetLowering::CallLoweringInfo &CLI,
8750                                     const BasicBlock *EHPadBB) {
8751   MCSymbol *BeginLabel = nullptr;
8752 
8753   if (EHPadBB) {
8754     // Both PendingLoads and PendingExports must be flushed here;
8755     // this call might not return.
8756     (void)getRoot();
8757     DAG.setRoot(lowerStartEH(getControlRoot(), EHPadBB, BeginLabel));
8758     CLI.setChain(getRoot());
8759   }
8760 
8761   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8762   std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
8763 
8764   assert((CLI.IsTailCall || Result.second.getNode()) &&
8765          "Non-null chain expected with non-tail call!");
8766   assert((Result.second.getNode() || !Result.first.getNode()) &&
8767          "Null value expected with tail call!");
8768 
8769   if (!Result.second.getNode()) {
8770     // As a special case, a null chain means that a tail call has been emitted
8771     // and the DAG root is already updated.
8772     HasTailCall = true;
8773 
8774     // Since there's no actual continuation from this block, nothing can be
8775     // relying on us setting vregs for them.
8776     PendingExports.clear();
8777   } else {
8778     DAG.setRoot(Result.second);
8779   }
8780 
8781   if (EHPadBB) {
8782     DAG.setRoot(lowerEndEH(getRoot(), cast_or_null<InvokeInst>(CLI.CB), EHPadBB,
8783                            BeginLabel));
8784     Result.second = getRoot();
8785   }
8786 
8787   return Result;
8788 }
8789 
8790 void SelectionDAGBuilder::LowerCallTo(const CallBase &CB, SDValue Callee,
8791                                       bool isTailCall, bool isMustTailCall,
8792                                       const BasicBlock *EHPadBB,
8793                                       const TargetLowering::PtrAuthInfo *PAI) {
8794   auto &DL = DAG.getDataLayout();
8795   FunctionType *FTy = CB.getFunctionType();
8796   Type *RetTy = CB.getType();
8797 
8798   TargetLowering::ArgListTy Args;
8799   Args.reserve(CB.arg_size());
8800 
8801   const Value *SwiftErrorVal = nullptr;
8802   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8803 
8804   if (isTailCall) {
8805     // Avoid emitting tail calls in functions with the disable-tail-calls
8806     // attribute.
8807     auto *Caller = CB.getParent()->getParent();
8808     if (Caller->getFnAttribute("disable-tail-calls").getValueAsString() ==
8809         "true" && !isMustTailCall)
8810       isTailCall = false;
8811 
8812     // We can't tail call inside a function with a swifterror argument. Lowering
8813     // does not support this yet. It would have to move into the swifterror
8814     // register before the call.
8815     if (TLI.supportSwiftError() &&
8816         Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
8817       isTailCall = false;
8818   }
8819 
8820   for (auto I = CB.arg_begin(), E = CB.arg_end(); I != E; ++I) {
8821     TargetLowering::ArgListEntry Entry;
8822     const Value *V = *I;
8823 
8824     // Skip empty types
8825     if (V->getType()->isEmptyTy())
8826       continue;
8827 
8828     SDValue ArgNode = getValue(V);
8829     Entry.Node = ArgNode; Entry.Ty = V->getType();
8830 
8831     Entry.setAttributes(&CB, I - CB.arg_begin());
8832 
8833     // Use swifterror virtual register as input to the call.
8834     if (Entry.IsSwiftError && TLI.supportSwiftError()) {
8835       SwiftErrorVal = V;
8836       // We find the virtual register for the actual swifterror argument.
8837       // Instead of using the Value, we use the virtual register instead.
8838       Entry.Node =
8839           DAG.getRegister(SwiftError.getOrCreateVRegUseAt(&CB, FuncInfo.MBB, V),
8840                           EVT(TLI.getPointerTy(DL)));
8841     }
8842 
8843     Args.push_back(Entry);
8844 
8845     // If we have an explicit sret argument that is an Instruction, (i.e., it
8846     // might point to function-local memory), we can't meaningfully tail-call.
8847     if (Entry.IsSRet && isa<Instruction>(V))
8848       isTailCall = false;
8849   }
8850 
8851   // If call site has a cfguardtarget operand bundle, create and add an
8852   // additional ArgListEntry.
8853   if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_cfguardtarget)) {
8854     TargetLowering::ArgListEntry Entry;
8855     Value *V = Bundle->Inputs[0];
8856     SDValue ArgNode = getValue(V);
8857     Entry.Node = ArgNode;
8858     Entry.Ty = V->getType();
8859     Entry.IsCFGuardTarget = true;
8860     Args.push_back(Entry);
8861   }
8862 
8863   // Check if target-independent constraints permit a tail call here.
8864   // Target-dependent constraints are checked within TLI->LowerCallTo.
8865   if (isTailCall && !isInTailCallPosition(CB, DAG.getTarget()))
8866     isTailCall = false;
8867 
8868   // Disable tail calls if there is an swifterror argument. Targets have not
8869   // been updated to support tail calls.
8870   if (TLI.supportSwiftError() && SwiftErrorVal)
8871     isTailCall = false;
8872 
8873   ConstantInt *CFIType = nullptr;
8874   if (CB.isIndirectCall()) {
8875     if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_kcfi)) {
8876       if (!TLI.supportKCFIBundles())
8877         report_fatal_error(
8878             "Target doesn't support calls with kcfi operand bundles.");
8879       CFIType = cast<ConstantInt>(Bundle->Inputs[0]);
8880       assert(CFIType->getType()->isIntegerTy(32) && "Invalid CFI type");
8881     }
8882   }
8883 
8884   SDValue ConvControlToken;
8885   if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_convergencectrl)) {
8886     auto *Token = Bundle->Inputs[0].get();
8887     ConvControlToken = getValue(Token);
8888   }
8889 
8890   TargetLowering::CallLoweringInfo CLI(DAG);
8891   CLI.setDebugLoc(getCurSDLoc())
8892       .setChain(getRoot())
8893       .setCallee(RetTy, FTy, Callee, std::move(Args), CB)
8894       .setTailCall(isTailCall)
8895       .setConvergent(CB.isConvergent())
8896       .setIsPreallocated(
8897           CB.countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0)
8898       .setCFIType(CFIType)
8899       .setConvergenceControlToken(ConvControlToken);
8900 
8901   // Set the pointer authentication info if we have it.
8902   if (PAI) {
8903     if (!TLI.supportPtrAuthBundles())
8904       report_fatal_error(
8905           "This target doesn't support calls with ptrauth operand bundles.");
8906     CLI.setPtrAuth(*PAI);
8907   }
8908 
8909   std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
8910 
8911   if (Result.first.getNode()) {
8912     Result.first = lowerRangeToAssertZExt(DAG, CB, Result.first);
8913     setValue(&CB, Result.first);
8914   }
8915 
8916   // The last element of CLI.InVals has the SDValue for swifterror return.
8917   // Here we copy it to a virtual register and update SwiftErrorMap for
8918   // book-keeping.
8919   if (SwiftErrorVal && TLI.supportSwiftError()) {
8920     // Get the last element of InVals.
8921     SDValue Src = CLI.InVals.back();
8922     Register VReg =
8923         SwiftError.getOrCreateVRegDefAt(&CB, FuncInfo.MBB, SwiftErrorVal);
8924     SDValue CopyNode = CLI.DAG.getCopyToReg(Result.second, CLI.DL, VReg, Src);
8925     DAG.setRoot(CopyNode);
8926   }
8927 }
8928 
8929 static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
8930                              SelectionDAGBuilder &Builder) {
8931   // Check to see if this load can be trivially constant folded, e.g. if the
8932   // input is from a string literal.
8933   if (const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) {
8934     // Cast pointer to the type we really want to load.
8935     Type *LoadTy =
8936         Type::getIntNTy(PtrVal->getContext(), LoadVT.getScalarSizeInBits());
8937     if (LoadVT.isVector())
8938       LoadTy = FixedVectorType::get(LoadTy, LoadVT.getVectorNumElements());
8939 
8940     LoadInput = ConstantExpr::getBitCast(const_cast<Constant *>(LoadInput),
8941                                          PointerType::getUnqual(LoadTy));
8942 
8943     if (const Constant *LoadCst =
8944             ConstantFoldLoadFromConstPtr(const_cast<Constant *>(LoadInput),
8945                                          LoadTy, Builder.DAG.getDataLayout()))
8946       return Builder.getValue(LoadCst);
8947   }
8948 
8949   // Otherwise, we have to emit the load.  If the pointer is to unfoldable but
8950   // still constant memory, the input chain can be the entry node.
8951   SDValue Root;
8952   bool ConstantMemory = false;
8953 
8954   // Do not serialize (non-volatile) loads of constant memory with anything.
8955   if (Builder.AA && Builder.AA->pointsToConstantMemory(PtrVal)) {
8956     Root = Builder.DAG.getEntryNode();
8957     ConstantMemory = true;
8958   } else {
8959     // Do not serialize non-volatile loads against each other.
8960     Root = Builder.DAG.getRoot();
8961   }
8962 
8963   SDValue Ptr = Builder.getValue(PtrVal);
8964   SDValue LoadVal =
8965       Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root, Ptr,
8966                           MachinePointerInfo(PtrVal), Align(1));
8967 
8968   if (!ConstantMemory)
8969     Builder.PendingLoads.push_back(LoadVal.getValue(1));
8970   return LoadVal;
8971 }
8972 
8973 /// Record the value for an instruction that produces an integer result,
8974 /// converting the type where necessary.
8975 void SelectionDAGBuilder::processIntegerCallValue(const Instruction &I,
8976                                                   SDValue Value,
8977                                                   bool IsSigned) {
8978   EVT VT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
8979                                                     I.getType(), true);
8980   Value = DAG.getExtOrTrunc(IsSigned, Value, getCurSDLoc(), VT);
8981   setValue(&I, Value);
8982 }
8983 
8984 /// See if we can lower a memcmp/bcmp call into an optimized form. If so, return
8985 /// true and lower it. Otherwise return false, and it will be lowered like a
8986 /// normal call.
8987 /// The caller already checked that \p I calls the appropriate LibFunc with a
8988 /// correct prototype.
8989 bool SelectionDAGBuilder::visitMemCmpBCmpCall(const CallInst &I) {
8990   const Value *LHS = I.getArgOperand(0), *RHS = I.getArgOperand(1);
8991   const Value *Size = I.getArgOperand(2);
8992   const ConstantSDNode *CSize = dyn_cast<ConstantSDNode>(getValue(Size));
8993   if (CSize && CSize->getZExtValue() == 0) {
8994     EVT CallVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
8995                                                           I.getType(), true);
8996     setValue(&I, DAG.getConstant(0, getCurSDLoc(), CallVT));
8997     return true;
8998   }
8999 
9000   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
9001   std::pair<SDValue, SDValue> Res = TSI.EmitTargetCodeForMemcmp(
9002       DAG, getCurSDLoc(), DAG.getRoot(), getValue(LHS), getValue(RHS),
9003       getValue(Size), MachinePointerInfo(LHS), MachinePointerInfo(RHS));
9004   if (Res.first.getNode()) {
9005     processIntegerCallValue(I, Res.first, true);
9006     PendingLoads.push_back(Res.second);
9007     return true;
9008   }
9009 
9010   // memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS)  != 0
9011   // memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS)  != 0
9012   if (!CSize || !isOnlyUsedInZeroEqualityComparison(&I))
9013     return false;
9014 
9015   // If the target has a fast compare for the given size, it will return a
9016   // preferred load type for that size. Require that the load VT is legal and
9017   // that the target supports unaligned loads of that type. Otherwise, return
9018   // INVALID.
9019   auto hasFastLoadsAndCompare = [&](unsigned NumBits) {
9020     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9021     MVT LVT = TLI.hasFastEqualityCompare(NumBits);
9022     if (LVT != MVT::INVALID_SIMPLE_VALUE_TYPE) {
9023       // TODO: Handle 5 byte compare as 4-byte + 1 byte.
9024       // TODO: Handle 8 byte compare on x86-32 as two 32-bit loads.
9025       // TODO: Check alignment of src and dest ptrs.
9026       unsigned DstAS = LHS->getType()->getPointerAddressSpace();
9027       unsigned SrcAS = RHS->getType()->getPointerAddressSpace();
9028       if (!TLI.isTypeLegal(LVT) ||
9029           !TLI.allowsMisalignedMemoryAccesses(LVT, SrcAS) ||
9030           !TLI.allowsMisalignedMemoryAccesses(LVT, DstAS))
9031         LVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
9032     }
9033 
9034     return LVT;
9035   };
9036 
9037   // This turns into unaligned loads. We only do this if the target natively
9038   // supports the MVT we'll be loading or if it is small enough (<= 4) that
9039   // we'll only produce a small number of byte loads.
9040   MVT LoadVT;
9041   unsigned NumBitsToCompare = CSize->getZExtValue() * 8;
9042   switch (NumBitsToCompare) {
9043   default:
9044     return false;
9045   case 16:
9046     LoadVT = MVT::i16;
9047     break;
9048   case 32:
9049     LoadVT = MVT::i32;
9050     break;
9051   case 64:
9052   case 128:
9053   case 256:
9054     LoadVT = hasFastLoadsAndCompare(NumBitsToCompare);
9055     break;
9056   }
9057 
9058   if (LoadVT == MVT::INVALID_SIMPLE_VALUE_TYPE)
9059     return false;
9060 
9061   SDValue LoadL = getMemCmpLoad(LHS, LoadVT, *this);
9062   SDValue LoadR = getMemCmpLoad(RHS, LoadVT, *this);
9063 
9064   // Bitcast to a wide integer type if the loads are vectors.
9065   if (LoadVT.isVector()) {
9066     EVT CmpVT = EVT::getIntegerVT(LHS->getContext(), LoadVT.getSizeInBits());
9067     LoadL = DAG.getBitcast(CmpVT, LoadL);
9068     LoadR = DAG.getBitcast(CmpVT, LoadR);
9069   }
9070 
9071   SDValue Cmp = DAG.getSetCC(getCurSDLoc(), MVT::i1, LoadL, LoadR, ISD::SETNE);
9072   processIntegerCallValue(I, Cmp, false);
9073   return true;
9074 }
9075 
9076 /// See if we can lower a memchr call into an optimized form. If so, return
9077 /// true and lower it. Otherwise return false, and it will be lowered like a
9078 /// normal call.
9079 /// The caller already checked that \p I calls the appropriate LibFunc with a
9080 /// correct prototype.
9081 bool SelectionDAGBuilder::visitMemChrCall(const CallInst &I) {
9082   const Value *Src = I.getArgOperand(0);
9083   const Value *Char = I.getArgOperand(1);
9084   const Value *Length = I.getArgOperand(2);
9085 
9086   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
9087   std::pair<SDValue, SDValue> Res =
9088     TSI.EmitTargetCodeForMemchr(DAG, getCurSDLoc(), DAG.getRoot(),
9089                                 getValue(Src), getValue(Char), getValue(Length),
9090                                 MachinePointerInfo(Src));
9091   if (Res.first.getNode()) {
9092     setValue(&I, Res.first);
9093     PendingLoads.push_back(Res.second);
9094     return true;
9095   }
9096 
9097   return false;
9098 }
9099 
9100 /// See if we can lower a mempcpy call into an optimized form. If so, return
9101 /// true and lower it. Otherwise return false, and it will be lowered like a
9102 /// normal call.
9103 /// The caller already checked that \p I calls the appropriate LibFunc with a
9104 /// correct prototype.
9105 bool SelectionDAGBuilder::visitMemPCpyCall(const CallInst &I) {
9106   SDValue Dst = getValue(I.getArgOperand(0));
9107   SDValue Src = getValue(I.getArgOperand(1));
9108   SDValue Size = getValue(I.getArgOperand(2));
9109 
9110   Align DstAlign = DAG.InferPtrAlign(Dst).valueOrOne();
9111   Align SrcAlign = DAG.InferPtrAlign(Src).valueOrOne();
9112   // DAG::getMemcpy needs Alignment to be defined.
9113   Align Alignment = std::min(DstAlign, SrcAlign);
9114 
9115   SDLoc sdl = getCurSDLoc();
9116 
9117   // In the mempcpy context we need to pass in a false value for isTailCall
9118   // because the return pointer needs to be adjusted by the size of
9119   // the copied memory.
9120   SDValue Root = getMemoryRoot();
9121   SDValue MC = DAG.getMemcpy(
9122       Root, sdl, Dst, Src, Size, Alignment, false, false, /*CI=*/nullptr,
9123       std::nullopt, MachinePointerInfo(I.getArgOperand(0)),
9124       MachinePointerInfo(I.getArgOperand(1)), I.getAAMetadata());
9125   assert(MC.getNode() != nullptr &&
9126          "** memcpy should not be lowered as TailCall in mempcpy context **");
9127   DAG.setRoot(MC);
9128 
9129   // Check if Size needs to be truncated or extended.
9130   Size = DAG.getSExtOrTrunc(Size, sdl, Dst.getValueType());
9131 
9132   // Adjust return pointer to point just past the last dst byte.
9133   SDValue DstPlusSize = DAG.getNode(ISD::ADD, sdl, Dst.getValueType(),
9134                                     Dst, Size);
9135   setValue(&I, DstPlusSize);
9136   return true;
9137 }
9138 
9139 /// See if we can lower a strcpy call into an optimized form.  If so, return
9140 /// true and lower it, otherwise return false and it will be lowered like a
9141 /// normal call.
9142 /// The caller already checked that \p I calls the appropriate LibFunc with a
9143 /// correct prototype.
9144 bool SelectionDAGBuilder::visitStrCpyCall(const CallInst &I, bool isStpcpy) {
9145   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
9146 
9147   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
9148   std::pair<SDValue, SDValue> Res =
9149     TSI.EmitTargetCodeForStrcpy(DAG, getCurSDLoc(), getRoot(),
9150                                 getValue(Arg0), getValue(Arg1),
9151                                 MachinePointerInfo(Arg0),
9152                                 MachinePointerInfo(Arg1), isStpcpy);
9153   if (Res.first.getNode()) {
9154     setValue(&I, Res.first);
9155     DAG.setRoot(Res.second);
9156     return true;
9157   }
9158 
9159   return false;
9160 }
9161 
9162 /// See if we can lower a strcmp call into an optimized form.  If so, return
9163 /// true and lower it, otherwise return false and it will be lowered like a
9164 /// normal call.
9165 /// The caller already checked that \p I calls the appropriate LibFunc with a
9166 /// correct prototype.
9167 bool SelectionDAGBuilder::visitStrCmpCall(const CallInst &I) {
9168   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
9169 
9170   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
9171   std::pair<SDValue, SDValue> Res =
9172     TSI.EmitTargetCodeForStrcmp(DAG, getCurSDLoc(), DAG.getRoot(),
9173                                 getValue(Arg0), getValue(Arg1),
9174                                 MachinePointerInfo(Arg0),
9175                                 MachinePointerInfo(Arg1));
9176   if (Res.first.getNode()) {
9177     processIntegerCallValue(I, Res.first, true);
9178     PendingLoads.push_back(Res.second);
9179     return true;
9180   }
9181 
9182   return false;
9183 }
9184 
9185 /// See if we can lower a strlen call into an optimized form.  If so, return
9186 /// true and lower it, otherwise return false and it will be lowered like a
9187 /// normal call.
9188 /// The caller already checked that \p I calls the appropriate LibFunc with a
9189 /// correct prototype.
9190 bool SelectionDAGBuilder::visitStrLenCall(const CallInst &I) {
9191   const Value *Arg0 = I.getArgOperand(0);
9192 
9193   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
9194   std::pair<SDValue, SDValue> Res =
9195     TSI.EmitTargetCodeForStrlen(DAG, getCurSDLoc(), DAG.getRoot(),
9196                                 getValue(Arg0), MachinePointerInfo(Arg0));
9197   if (Res.first.getNode()) {
9198     processIntegerCallValue(I, Res.first, false);
9199     PendingLoads.push_back(Res.second);
9200     return true;
9201   }
9202 
9203   return false;
9204 }
9205 
9206 /// See if we can lower a strnlen call into an optimized form.  If so, return
9207 /// true and lower it, otherwise return false and it will be lowered like a
9208 /// normal call.
9209 /// The caller already checked that \p I calls the appropriate LibFunc with a
9210 /// correct prototype.
9211 bool SelectionDAGBuilder::visitStrNLenCall(const CallInst &I) {
9212   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
9213 
9214   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
9215   std::pair<SDValue, SDValue> Res =
9216     TSI.EmitTargetCodeForStrnlen(DAG, getCurSDLoc(), DAG.getRoot(),
9217                                  getValue(Arg0), getValue(Arg1),
9218                                  MachinePointerInfo(Arg0));
9219   if (Res.first.getNode()) {
9220     processIntegerCallValue(I, Res.first, false);
9221     PendingLoads.push_back(Res.second);
9222     return true;
9223   }
9224 
9225   return false;
9226 }
9227 
9228 /// See if we can lower a unary floating-point operation into an SDNode with
9229 /// the specified Opcode.  If so, return true and lower it, otherwise return
9230 /// false and it will be lowered like a normal call.
9231 /// The caller already checked that \p I calls the appropriate LibFunc with a
9232 /// correct prototype.
9233 bool SelectionDAGBuilder::visitUnaryFloatCall(const CallInst &I,
9234                                               unsigned Opcode) {
9235   // We already checked this call's prototype; verify it doesn't modify errno.
9236   if (!I.onlyReadsMemory())
9237     return false;
9238 
9239   SDNodeFlags Flags;
9240   Flags.copyFMF(cast<FPMathOperator>(I));
9241 
9242   SDValue Tmp = getValue(I.getArgOperand(0));
9243   setValue(&I,
9244            DAG.getNode(Opcode, getCurSDLoc(), Tmp.getValueType(), Tmp, Flags));
9245   return true;
9246 }
9247 
9248 /// See if we can lower a binary floating-point operation into an SDNode with
9249 /// the specified Opcode. If so, return true and lower it. Otherwise return
9250 /// false, and it will be lowered like a normal call.
9251 /// The caller already checked that \p I calls the appropriate LibFunc with a
9252 /// correct prototype.
9253 bool SelectionDAGBuilder::visitBinaryFloatCall(const CallInst &I,
9254                                                unsigned Opcode) {
9255   // We already checked this call's prototype; verify it doesn't modify errno.
9256   if (!I.onlyReadsMemory())
9257     return false;
9258 
9259   SDNodeFlags Flags;
9260   Flags.copyFMF(cast<FPMathOperator>(I));
9261 
9262   SDValue Tmp0 = getValue(I.getArgOperand(0));
9263   SDValue Tmp1 = getValue(I.getArgOperand(1));
9264   EVT VT = Tmp0.getValueType();
9265   setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), VT, Tmp0, Tmp1, Flags));
9266   return true;
9267 }
9268 
9269 void SelectionDAGBuilder::visitCall(const CallInst &I) {
9270   // Handle inline assembly differently.
9271   if (I.isInlineAsm()) {
9272     visitInlineAsm(I);
9273     return;
9274   }
9275 
9276   diagnoseDontCall(I);
9277 
9278   if (Function *F = I.getCalledFunction()) {
9279     if (F->isDeclaration()) {
9280       // Is this an LLVM intrinsic or a target-specific intrinsic?
9281       unsigned IID = F->getIntrinsicID();
9282       if (!IID)
9283         if (const TargetIntrinsicInfo *II = TM.getIntrinsicInfo())
9284           IID = II->getIntrinsicID(F);
9285 
9286       if (IID) {
9287         visitIntrinsicCall(I, IID);
9288         return;
9289       }
9290     }
9291 
9292     // Check for well-known libc/libm calls.  If the function is internal, it
9293     // can't be a library call.  Don't do the check if marked as nobuiltin for
9294     // some reason or the call site requires strict floating point semantics.
9295     LibFunc Func;
9296     if (!I.isNoBuiltin() && !I.isStrictFP() && !F->hasLocalLinkage() &&
9297         F->hasName() && LibInfo->getLibFunc(*F, Func) &&
9298         LibInfo->hasOptimizedCodeGen(Func)) {
9299       switch (Func) {
9300       default: break;
9301       case LibFunc_bcmp:
9302         if (visitMemCmpBCmpCall(I))
9303           return;
9304         break;
9305       case LibFunc_copysign:
9306       case LibFunc_copysignf:
9307       case LibFunc_copysignl:
9308         // We already checked this call's prototype; verify it doesn't modify
9309         // errno.
9310         if (I.onlyReadsMemory()) {
9311           SDValue LHS = getValue(I.getArgOperand(0));
9312           SDValue RHS = getValue(I.getArgOperand(1));
9313           setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurSDLoc(),
9314                                    LHS.getValueType(), LHS, RHS));
9315           return;
9316         }
9317         break;
9318       case LibFunc_fabs:
9319       case LibFunc_fabsf:
9320       case LibFunc_fabsl:
9321         if (visitUnaryFloatCall(I, ISD::FABS))
9322           return;
9323         break;
9324       case LibFunc_fmin:
9325       case LibFunc_fminf:
9326       case LibFunc_fminl:
9327         if (visitBinaryFloatCall(I, ISD::FMINNUM))
9328           return;
9329         break;
9330       case LibFunc_fmax:
9331       case LibFunc_fmaxf:
9332       case LibFunc_fmaxl:
9333         if (visitBinaryFloatCall(I, ISD::FMAXNUM))
9334           return;
9335         break;
9336       case LibFunc_fminimum_num:
9337       case LibFunc_fminimum_numf:
9338       case LibFunc_fminimum_numl:
9339         if (visitBinaryFloatCall(I, ISD::FMINIMUMNUM))
9340           return;
9341         break;
9342       case LibFunc_fmaximum_num:
9343       case LibFunc_fmaximum_numf:
9344       case LibFunc_fmaximum_numl:
9345         if (visitBinaryFloatCall(I, ISD::FMAXIMUMNUM))
9346           return;
9347         break;
9348       case LibFunc_sin:
9349       case LibFunc_sinf:
9350       case LibFunc_sinl:
9351         if (visitUnaryFloatCall(I, ISD::FSIN))
9352           return;
9353         break;
9354       case LibFunc_cos:
9355       case LibFunc_cosf:
9356       case LibFunc_cosl:
9357         if (visitUnaryFloatCall(I, ISD::FCOS))
9358           return;
9359         break;
9360       case LibFunc_tan:
9361       case LibFunc_tanf:
9362       case LibFunc_tanl:
9363         if (visitUnaryFloatCall(I, ISD::FTAN))
9364           return;
9365         break;
9366       case LibFunc_asin:
9367       case LibFunc_asinf:
9368       case LibFunc_asinl:
9369         if (visitUnaryFloatCall(I, ISD::FASIN))
9370           return;
9371         break;
9372       case LibFunc_acos:
9373       case LibFunc_acosf:
9374       case LibFunc_acosl:
9375         if (visitUnaryFloatCall(I, ISD::FACOS))
9376           return;
9377         break;
9378       case LibFunc_atan:
9379       case LibFunc_atanf:
9380       case LibFunc_atanl:
9381         if (visitUnaryFloatCall(I, ISD::FATAN))
9382           return;
9383         break;
9384       case LibFunc_atan2:
9385       case LibFunc_atan2f:
9386       case LibFunc_atan2l:
9387         if (visitBinaryFloatCall(I, ISD::FATAN2))
9388           return;
9389         break;
9390       case LibFunc_sinh:
9391       case LibFunc_sinhf:
9392       case LibFunc_sinhl:
9393         if (visitUnaryFloatCall(I, ISD::FSINH))
9394           return;
9395         break;
9396       case LibFunc_cosh:
9397       case LibFunc_coshf:
9398       case LibFunc_coshl:
9399         if (visitUnaryFloatCall(I, ISD::FCOSH))
9400           return;
9401         break;
9402       case LibFunc_tanh:
9403       case LibFunc_tanhf:
9404       case LibFunc_tanhl:
9405         if (visitUnaryFloatCall(I, ISD::FTANH))
9406           return;
9407         break;
9408       case LibFunc_sqrt:
9409       case LibFunc_sqrtf:
9410       case LibFunc_sqrtl:
9411       case LibFunc_sqrt_finite:
9412       case LibFunc_sqrtf_finite:
9413       case LibFunc_sqrtl_finite:
9414         if (visitUnaryFloatCall(I, ISD::FSQRT))
9415           return;
9416         break;
9417       case LibFunc_floor:
9418       case LibFunc_floorf:
9419       case LibFunc_floorl:
9420         if (visitUnaryFloatCall(I, ISD::FFLOOR))
9421           return;
9422         break;
9423       case LibFunc_nearbyint:
9424       case LibFunc_nearbyintf:
9425       case LibFunc_nearbyintl:
9426         if (visitUnaryFloatCall(I, ISD::FNEARBYINT))
9427           return;
9428         break;
9429       case LibFunc_ceil:
9430       case LibFunc_ceilf:
9431       case LibFunc_ceill:
9432         if (visitUnaryFloatCall(I, ISD::FCEIL))
9433           return;
9434         break;
9435       case LibFunc_rint:
9436       case LibFunc_rintf:
9437       case LibFunc_rintl:
9438         if (visitUnaryFloatCall(I, ISD::FRINT))
9439           return;
9440         break;
9441       case LibFunc_round:
9442       case LibFunc_roundf:
9443       case LibFunc_roundl:
9444         if (visitUnaryFloatCall(I, ISD::FROUND))
9445           return;
9446         break;
9447       case LibFunc_trunc:
9448       case LibFunc_truncf:
9449       case LibFunc_truncl:
9450         if (visitUnaryFloatCall(I, ISD::FTRUNC))
9451           return;
9452         break;
9453       case LibFunc_log2:
9454       case LibFunc_log2f:
9455       case LibFunc_log2l:
9456         if (visitUnaryFloatCall(I, ISD::FLOG2))
9457           return;
9458         break;
9459       case LibFunc_exp2:
9460       case LibFunc_exp2f:
9461       case LibFunc_exp2l:
9462         if (visitUnaryFloatCall(I, ISD::FEXP2))
9463           return;
9464         break;
9465       case LibFunc_exp10:
9466       case LibFunc_exp10f:
9467       case LibFunc_exp10l:
9468         if (visitUnaryFloatCall(I, ISD::FEXP10))
9469           return;
9470         break;
9471       case LibFunc_ldexp:
9472       case LibFunc_ldexpf:
9473       case LibFunc_ldexpl:
9474         if (visitBinaryFloatCall(I, ISD::FLDEXP))
9475           return;
9476         break;
9477       case LibFunc_memcmp:
9478         if (visitMemCmpBCmpCall(I))
9479           return;
9480         break;
9481       case LibFunc_mempcpy:
9482         if (visitMemPCpyCall(I))
9483           return;
9484         break;
9485       case LibFunc_memchr:
9486         if (visitMemChrCall(I))
9487           return;
9488         break;
9489       case LibFunc_strcpy:
9490         if (visitStrCpyCall(I, false))
9491           return;
9492         break;
9493       case LibFunc_stpcpy:
9494         if (visitStrCpyCall(I, true))
9495           return;
9496         break;
9497       case LibFunc_strcmp:
9498         if (visitStrCmpCall(I))
9499           return;
9500         break;
9501       case LibFunc_strlen:
9502         if (visitStrLenCall(I))
9503           return;
9504         break;
9505       case LibFunc_strnlen:
9506         if (visitStrNLenCall(I))
9507           return;
9508         break;
9509       }
9510     }
9511   }
9512 
9513   if (I.countOperandBundlesOfType(LLVMContext::OB_ptrauth)) {
9514     LowerCallSiteWithPtrAuthBundle(cast<CallBase>(I), /*EHPadBB=*/nullptr);
9515     return;
9516   }
9517 
9518   // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
9519   // have to do anything here to lower funclet bundles.
9520   // CFGuardTarget bundles are lowered in LowerCallTo.
9521   assert(!I.hasOperandBundlesOtherThan(
9522              {LLVMContext::OB_deopt, LLVMContext::OB_funclet,
9523               LLVMContext::OB_cfguardtarget, LLVMContext::OB_preallocated,
9524               LLVMContext::OB_clang_arc_attachedcall, LLVMContext::OB_kcfi,
9525               LLVMContext::OB_convergencectrl}) &&
9526          "Cannot lower calls with arbitrary operand bundles!");
9527 
9528   SDValue Callee = getValue(I.getCalledOperand());
9529 
9530   if (I.hasDeoptState())
9531     LowerCallSiteWithDeoptBundle(&I, Callee, nullptr);
9532   else
9533     // Check if we can potentially perform a tail call. More detailed checking
9534     // is be done within LowerCallTo, after more information about the call is
9535     // known.
9536     LowerCallTo(I, Callee, I.isTailCall(), I.isMustTailCall());
9537 }
9538 
9539 void SelectionDAGBuilder::LowerCallSiteWithPtrAuthBundle(
9540     const CallBase &CB, const BasicBlock *EHPadBB) {
9541   auto PAB = CB.getOperandBundle("ptrauth");
9542   const Value *CalleeV = CB.getCalledOperand();
9543 
9544   // Gather the call ptrauth data from the operand bundle:
9545   //   [ i32 <key>, i64 <discriminator> ]
9546   const auto *Key = cast<ConstantInt>(PAB->Inputs[0]);
9547   const Value *Discriminator = PAB->Inputs[1];
9548 
9549   assert(Key->getType()->isIntegerTy(32) && "Invalid ptrauth key");
9550   assert(Discriminator->getType()->isIntegerTy(64) &&
9551          "Invalid ptrauth discriminator");
9552 
9553   // Look through ptrauth constants to find the raw callee.
9554   // Do a direct unauthenticated call if we found it and everything matches.
9555   if (const auto *CalleeCPA = dyn_cast<ConstantPtrAuth>(CalleeV))
9556     if (CalleeCPA->isKnownCompatibleWith(Key, Discriminator,
9557                                          DAG.getDataLayout()))
9558       return LowerCallTo(CB, getValue(CalleeCPA->getPointer()), CB.isTailCall(),
9559                          CB.isMustTailCall(), EHPadBB);
9560 
9561   // Functions should never be ptrauth-called directly.
9562   assert(!isa<Function>(CalleeV) && "invalid direct ptrauth call");
9563 
9564   // Otherwise, do an authenticated indirect call.
9565   TargetLowering::PtrAuthInfo PAI = {Key->getZExtValue(),
9566                                      getValue(Discriminator)};
9567 
9568   LowerCallTo(CB, getValue(CalleeV), CB.isTailCall(), CB.isMustTailCall(),
9569               EHPadBB, &PAI);
9570 }
9571 
9572 namespace {
9573 
9574 /// AsmOperandInfo - This contains information for each constraint that we are
9575 /// lowering.
9576 class SDISelAsmOperandInfo : public TargetLowering::AsmOperandInfo {
9577 public:
9578   /// CallOperand - If this is the result output operand or a clobber
9579   /// this is null, otherwise it is the incoming operand to the CallInst.
9580   /// This gets modified as the asm is processed.
9581   SDValue CallOperand;
9582 
9583   /// AssignedRegs - If this is a register or register class operand, this
9584   /// contains the set of register corresponding to the operand.
9585   RegsForValue AssignedRegs;
9586 
9587   explicit SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &info)
9588     : TargetLowering::AsmOperandInfo(info), CallOperand(nullptr, 0) {
9589   }
9590 
9591   /// Whether or not this operand accesses memory
9592   bool hasMemory(const TargetLowering &TLI) const {
9593     // Indirect operand accesses access memory.
9594     if (isIndirect)
9595       return true;
9596 
9597     for (const auto &Code : Codes)
9598       if (TLI.getConstraintType(Code) == TargetLowering::C_Memory)
9599         return true;
9600 
9601     return false;
9602   }
9603 };
9604 
9605 
9606 } // end anonymous namespace
9607 
9608 /// Make sure that the output operand \p OpInfo and its corresponding input
9609 /// operand \p MatchingOpInfo have compatible constraint types (otherwise error
9610 /// out).
9611 static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo,
9612                                SDISelAsmOperandInfo &MatchingOpInfo,
9613                                SelectionDAG &DAG) {
9614   if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT)
9615     return;
9616 
9617   const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
9618   const auto &TLI = DAG.getTargetLoweringInfo();
9619 
9620   std::pair<unsigned, const TargetRegisterClass *> MatchRC =
9621       TLI.getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode,
9622                                        OpInfo.ConstraintVT);
9623   std::pair<unsigned, const TargetRegisterClass *> InputRC =
9624       TLI.getRegForInlineAsmConstraint(TRI, MatchingOpInfo.ConstraintCode,
9625                                        MatchingOpInfo.ConstraintVT);
9626   const bool OutOpIsIntOrFP =
9627       OpInfo.ConstraintVT.isInteger() || OpInfo.ConstraintVT.isFloatingPoint();
9628   const bool InOpIsIntOrFP = MatchingOpInfo.ConstraintVT.isInteger() ||
9629                              MatchingOpInfo.ConstraintVT.isFloatingPoint();
9630   if ((OutOpIsIntOrFP != InOpIsIntOrFP) || (MatchRC.second != InputRC.second)) {
9631     // FIXME: error out in a more elegant fashion
9632     report_fatal_error("Unsupported asm: input constraint"
9633                        " with a matching output constraint of"
9634                        " incompatible type!");
9635   }
9636   MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT;
9637 }
9638 
9639 /// Get a direct memory input to behave well as an indirect operand.
9640 /// This may introduce stores, hence the need for a \p Chain.
9641 /// \return The (possibly updated) chain.
9642 static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location,
9643                                         SDISelAsmOperandInfo &OpInfo,
9644                                         SelectionDAG &DAG) {
9645   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9646 
9647   // If we don't have an indirect input, put it in the constpool if we can,
9648   // otherwise spill it to a stack slot.
9649   // TODO: This isn't quite right. We need to handle these according to
9650   // the addressing mode that the constraint wants. Also, this may take
9651   // an additional register for the computation and we don't want that
9652   // either.
9653 
9654   // If the operand is a float, integer, or vector constant, spill to a
9655   // constant pool entry to get its address.
9656   const Value *OpVal = OpInfo.CallOperandVal;
9657   if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
9658       isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) {
9659     OpInfo.CallOperand = DAG.getConstantPool(
9660         cast<Constant>(OpVal), TLI.getPointerTy(DAG.getDataLayout()));
9661     return Chain;
9662   }
9663 
9664   // Otherwise, create a stack slot and emit a store to it before the asm.
9665   Type *Ty = OpVal->getType();
9666   auto &DL = DAG.getDataLayout();
9667   TypeSize TySize = DL.getTypeAllocSize(Ty);
9668   MachineFunction &MF = DAG.getMachineFunction();
9669   const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
9670   int StackID = 0;
9671   if (TySize.isScalable())
9672     StackID = TFI->getStackIDForScalableVectors();
9673   int SSFI = MF.getFrameInfo().CreateStackObject(TySize.getKnownMinValue(),
9674                                                  DL.getPrefTypeAlign(Ty), false,
9675                                                  nullptr, StackID);
9676   SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getFrameIndexTy(DL));
9677   Chain = DAG.getTruncStore(Chain, Location, OpInfo.CallOperand, StackSlot,
9678                             MachinePointerInfo::getFixedStack(MF, SSFI),
9679                             TLI.getMemValueType(DL, Ty));
9680   OpInfo.CallOperand = StackSlot;
9681 
9682   return Chain;
9683 }
9684 
9685 /// GetRegistersForValue - Assign registers (virtual or physical) for the
9686 /// specified operand.  We prefer to assign virtual registers, to allow the
9687 /// register allocator to handle the assignment process.  However, if the asm
9688 /// uses features that we can't model on machineinstrs, we have SDISel do the
9689 /// allocation.  This produces generally horrible, but correct, code.
9690 ///
9691 ///   OpInfo describes the operand
9692 ///   RefOpInfo describes the matching operand if any, the operand otherwise
9693 static std::optional<unsigned>
9694 getRegistersForValue(SelectionDAG &DAG, const SDLoc &DL,
9695                      SDISelAsmOperandInfo &OpInfo,
9696                      SDISelAsmOperandInfo &RefOpInfo) {
9697   LLVMContext &Context = *DAG.getContext();
9698   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9699 
9700   MachineFunction &MF = DAG.getMachineFunction();
9701   SmallVector<Register, 4> Regs;
9702   const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
9703 
9704   // No work to do for memory/address operands.
9705   if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
9706       OpInfo.ConstraintType == TargetLowering::C_Address)
9707     return std::nullopt;
9708 
9709   // If this is a constraint for a single physreg, or a constraint for a
9710   // register class, find it.
9711   unsigned AssignedReg;
9712   const TargetRegisterClass *RC;
9713   std::tie(AssignedReg, RC) = TLI.getRegForInlineAsmConstraint(
9714       &TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT);
9715   // RC is unset only on failure. Return immediately.
9716   if (!RC)
9717     return std::nullopt;
9718 
9719   // Get the actual register value type.  This is important, because the user
9720   // may have asked for (e.g.) the AX register in i32 type.  We need to
9721   // remember that AX is actually i16 to get the right extension.
9722   const MVT RegVT = *TRI.legalclasstypes_begin(*RC);
9723 
9724   if (OpInfo.ConstraintVT != MVT::Other && RegVT != MVT::Untyped) {
9725     // If this is an FP operand in an integer register (or visa versa), or more
9726     // generally if the operand value disagrees with the register class we plan
9727     // to stick it in, fix the operand type.
9728     //
9729     // If this is an input value, the bitcast to the new type is done now.
9730     // Bitcast for output value is done at the end of visitInlineAsm().
9731     if ((OpInfo.Type == InlineAsm::isOutput ||
9732          OpInfo.Type == InlineAsm::isInput) &&
9733         !TRI.isTypeLegalForClass(*RC, OpInfo.ConstraintVT)) {
9734       // Try to convert to the first EVT that the reg class contains.  If the
9735       // types are identical size, use a bitcast to convert (e.g. two differing
9736       // vector types).  Note: output bitcast is done at the end of
9737       // visitInlineAsm().
9738       if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
9739         // Exclude indirect inputs while they are unsupported because the code
9740         // to perform the load is missing and thus OpInfo.CallOperand still
9741         // refers to the input address rather than the pointed-to value.
9742         if (OpInfo.Type == InlineAsm::isInput && !OpInfo.isIndirect)
9743           OpInfo.CallOperand =
9744               DAG.getNode(ISD::BITCAST, DL, RegVT, OpInfo.CallOperand);
9745         OpInfo.ConstraintVT = RegVT;
9746         // If the operand is an FP value and we want it in integer registers,
9747         // use the corresponding integer type. This turns an f64 value into
9748         // i64, which can be passed with two i32 values on a 32-bit machine.
9749       } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
9750         MVT VT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits());
9751         if (OpInfo.Type == InlineAsm::isInput)
9752           OpInfo.CallOperand =
9753               DAG.getNode(ISD::BITCAST, DL, VT, OpInfo.CallOperand);
9754         OpInfo.ConstraintVT = VT;
9755       }
9756     }
9757   }
9758 
9759   // No need to allocate a matching input constraint since the constraint it's
9760   // matching to has already been allocated.
9761   if (OpInfo.isMatchingInputConstraint())
9762     return std::nullopt;
9763 
9764   EVT ValueVT = OpInfo.ConstraintVT;
9765   if (OpInfo.ConstraintVT == MVT::Other)
9766     ValueVT = RegVT;
9767 
9768   // Initialize NumRegs.
9769   unsigned NumRegs = 1;
9770   if (OpInfo.ConstraintVT != MVT::Other)
9771     NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT, RegVT);
9772 
9773   // If this is a constraint for a specific physical register, like {r17},
9774   // assign it now.
9775 
9776   // If this associated to a specific register, initialize iterator to correct
9777   // place. If virtual, make sure we have enough registers
9778 
9779   // Initialize iterator if necessary
9780   TargetRegisterClass::iterator I = RC->begin();
9781   MachineRegisterInfo &RegInfo = MF.getRegInfo();
9782 
9783   // Do not check for single registers.
9784   if (AssignedReg) {
9785     I = std::find(I, RC->end(), AssignedReg);
9786     if (I == RC->end()) {
9787       // RC does not contain the selected register, which indicates a
9788       // mismatch between the register and the required type/bitwidth.
9789       return {AssignedReg};
9790     }
9791   }
9792 
9793   for (; NumRegs; --NumRegs, ++I) {
9794     assert(I != RC->end() && "Ran out of registers to allocate!");
9795     Register R = AssignedReg ? Register(*I) : RegInfo.createVirtualRegister(RC);
9796     Regs.push_back(R);
9797   }
9798 
9799   OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
9800   return std::nullopt;
9801 }
9802 
9803 static unsigned
9804 findMatchingInlineAsmOperand(unsigned OperandNo,
9805                              const std::vector<SDValue> &AsmNodeOperands) {
9806   // Scan until we find the definition we already emitted of this operand.
9807   unsigned CurOp = InlineAsm::Op_FirstOperand;
9808   for (; OperandNo; --OperandNo) {
9809     // Advance to the next operand.
9810     unsigned OpFlag = AsmNodeOperands[CurOp]->getAsZExtVal();
9811     const InlineAsm::Flag F(OpFlag);
9812     assert(
9813         (F.isRegDefKind() || F.isRegDefEarlyClobberKind() || F.isMemKind()) &&
9814         "Skipped past definitions?");
9815     CurOp += F.getNumOperandRegisters() + 1;
9816   }
9817   return CurOp;
9818 }
9819 
9820 namespace {
9821 
9822 class ExtraFlags {
9823   unsigned Flags = 0;
9824 
9825 public:
9826   explicit ExtraFlags(const CallBase &Call) {
9827     const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
9828     if (IA->hasSideEffects())
9829       Flags |= InlineAsm::Extra_HasSideEffects;
9830     if (IA->isAlignStack())
9831       Flags |= InlineAsm::Extra_IsAlignStack;
9832     if (Call.isConvergent())
9833       Flags |= InlineAsm::Extra_IsConvergent;
9834     Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
9835   }
9836 
9837   void update(const TargetLowering::AsmOperandInfo &OpInfo) {
9838     // Ideally, we would only check against memory constraints.  However, the
9839     // meaning of an Other constraint can be target-specific and we can't easily
9840     // reason about it.  Therefore, be conservative and set MayLoad/MayStore
9841     // for Other constraints as well.
9842     if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
9843         OpInfo.ConstraintType == TargetLowering::C_Other) {
9844       if (OpInfo.Type == InlineAsm::isInput)
9845         Flags |= InlineAsm::Extra_MayLoad;
9846       else if (OpInfo.Type == InlineAsm::isOutput)
9847         Flags |= InlineAsm::Extra_MayStore;
9848       else if (OpInfo.Type == InlineAsm::isClobber)
9849         Flags |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore);
9850     }
9851   }
9852 
9853   unsigned get() const { return Flags; }
9854 };
9855 
9856 } // end anonymous namespace
9857 
9858 static bool isFunction(SDValue Op) {
9859   if (Op && Op.getOpcode() == ISD::GlobalAddress) {
9860     if (auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
9861       auto Fn = dyn_cast_or_null<Function>(GA->getGlobal());
9862 
9863       // In normal "call dllimport func" instruction (non-inlineasm) it force
9864       // indirect access by specifing call opcode. And usually specially print
9865       // asm with indirect symbol (i.g: "*") according to opcode. Inline asm can
9866       // not do in this way now. (In fact, this is similar with "Data Access"
9867       // action). So here we ignore dllimport function.
9868       if (Fn && !Fn->hasDLLImportStorageClass())
9869         return true;
9870     }
9871   }
9872   return false;
9873 }
9874 
9875 /// visitInlineAsm - Handle a call to an InlineAsm object.
9876 void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call,
9877                                          const BasicBlock *EHPadBB) {
9878   const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
9879 
9880   /// ConstraintOperands - Information about all of the constraints.
9881   SmallVector<SDISelAsmOperandInfo, 16> ConstraintOperands;
9882 
9883   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9884   TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints(
9885       DAG.getDataLayout(), DAG.getSubtarget().getRegisterInfo(), Call);
9886 
9887   // First Pass: Calculate HasSideEffects and ExtraFlags (AlignStack,
9888   // AsmDialect, MayLoad, MayStore).
9889   bool HasSideEffect = IA->hasSideEffects();
9890   ExtraFlags ExtraInfo(Call);
9891 
9892   for (auto &T : TargetConstraints) {
9893     ConstraintOperands.push_back(SDISelAsmOperandInfo(T));
9894     SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
9895 
9896     if (OpInfo.CallOperandVal)
9897       OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
9898 
9899     if (!HasSideEffect)
9900       HasSideEffect = OpInfo.hasMemory(TLI);
9901 
9902     // Determine if this InlineAsm MayLoad or MayStore based on the constraints.
9903     // FIXME: Could we compute this on OpInfo rather than T?
9904 
9905     // Compute the constraint code and ConstraintType to use.
9906     TLI.ComputeConstraintToUse(T, SDValue());
9907 
9908     if (T.ConstraintType == TargetLowering::C_Immediate &&
9909         OpInfo.CallOperand && !isa<ConstantSDNode>(OpInfo.CallOperand))
9910       // We've delayed emitting a diagnostic like the "n" constraint because
9911       // inlining could cause an integer showing up.
9912       return emitInlineAsmError(Call, "constraint '" + Twine(T.ConstraintCode) +
9913                                           "' expects an integer constant "
9914                                           "expression");
9915 
9916     ExtraInfo.update(T);
9917   }
9918 
9919   // We won't need to flush pending loads if this asm doesn't touch
9920   // memory and is nonvolatile.
9921   SDValue Glue, Chain = (HasSideEffect) ? getRoot() : DAG.getRoot();
9922 
9923   bool EmitEHLabels = isa<InvokeInst>(Call);
9924   if (EmitEHLabels) {
9925     assert(EHPadBB && "InvokeInst must have an EHPadBB");
9926   }
9927   bool IsCallBr = isa<CallBrInst>(Call);
9928 
9929   if (IsCallBr || EmitEHLabels) {
9930     // If this is a callbr or invoke we need to flush pending exports since
9931     // inlineasm_br and invoke are terminators.
9932     // We need to do this before nodes are glued to the inlineasm_br node.
9933     Chain = getControlRoot();
9934   }
9935 
9936   MCSymbol *BeginLabel = nullptr;
9937   if (EmitEHLabels) {
9938     Chain = lowerStartEH(Chain, EHPadBB, BeginLabel);
9939   }
9940 
9941   int OpNo = -1;
9942   SmallVector<StringRef> AsmStrs;
9943   IA->collectAsmStrs(AsmStrs);
9944 
9945   // Second pass over the constraints: compute which constraint option to use.
9946   for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
9947     if (OpInfo.hasArg() || OpInfo.Type == InlineAsm::isOutput)
9948       OpNo++;
9949 
9950     // If this is an output operand with a matching input operand, look up the
9951     // matching input. If their types mismatch, e.g. one is an integer, the
9952     // other is floating point, or their sizes are different, flag it as an
9953     // error.
9954     if (OpInfo.hasMatchingInput()) {
9955       SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
9956       patchMatchingInput(OpInfo, Input, DAG);
9957     }
9958 
9959     // Compute the constraint code and ConstraintType to use.
9960     TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
9961 
9962     if ((OpInfo.ConstraintType == TargetLowering::C_Memory &&
9963          OpInfo.Type == InlineAsm::isClobber) ||
9964         OpInfo.ConstraintType == TargetLowering::C_Address)
9965       continue;
9966 
9967     // In Linux PIC model, there are 4 cases about value/label addressing:
9968     //
9969     // 1: Function call or Label jmp inside the module.
9970     // 2: Data access (such as global variable, static variable) inside module.
9971     // 3: Function call or Label jmp outside the module.
9972     // 4: Data access (such as global variable) outside the module.
9973     //
9974     // Due to current llvm inline asm architecture designed to not "recognize"
9975     // the asm code, there are quite troubles for us to treat mem addressing
9976     // differently for same value/adress used in different instuctions.
9977     // For example, in pic model, call a func may in plt way or direclty
9978     // pc-related, but lea/mov a function adress may use got.
9979     //
9980     // Here we try to "recognize" function call for the case 1 and case 3 in
9981     // inline asm. And try to adjust the constraint for them.
9982     //
9983     // TODO: Due to current inline asm didn't encourage to jmp to the outsider
9984     // label, so here we don't handle jmp function label now, but we need to
9985     // enhance it (especilly in PIC model) if we meet meaningful requirements.
9986     if (OpInfo.isIndirect && isFunction(OpInfo.CallOperand) &&
9987         TLI.isInlineAsmTargetBranch(AsmStrs, OpNo) &&
9988         TM.getCodeModel() != CodeModel::Large) {
9989       OpInfo.isIndirect = false;
9990       OpInfo.ConstraintType = TargetLowering::C_Address;
9991     }
9992 
9993     // If this is a memory input, and if the operand is not indirect, do what we
9994     // need to provide an address for the memory input.
9995     if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
9996         !OpInfo.isIndirect) {
9997       assert((OpInfo.isMultipleAlternative ||
9998               (OpInfo.Type == InlineAsm::isInput)) &&
9999              "Can only indirectify direct input operands!");
10000 
10001       // Memory operands really want the address of the value.
10002       Chain = getAddressForMemoryInput(Chain, getCurSDLoc(), OpInfo, DAG);
10003 
10004       // There is no longer a Value* corresponding to this operand.
10005       OpInfo.CallOperandVal = nullptr;
10006 
10007       // It is now an indirect operand.
10008       OpInfo.isIndirect = true;
10009     }
10010 
10011   }
10012 
10013   // AsmNodeOperands - The operands for the ISD::INLINEASM node.
10014   std::vector<SDValue> AsmNodeOperands;
10015   AsmNodeOperands.push_back(SDValue());  // reserve space for input chain
10016   AsmNodeOperands.push_back(DAG.getTargetExternalSymbol(
10017       IA->getAsmString().c_str(), TLI.getProgramPointerTy(DAG.getDataLayout())));
10018 
10019   // If we have a !srcloc metadata node associated with it, we want to attach
10020   // this to the ultimately generated inline asm machineinstr.  To do this, we
10021   // pass in the third operand as this (potentially null) inline asm MDNode.
10022   const MDNode *SrcLoc = Call.getMetadata("srcloc");
10023   AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc));
10024 
10025   // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore
10026   // bits as operand 3.
10027   AsmNodeOperands.push_back(DAG.getTargetConstant(
10028       ExtraInfo.get(), getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
10029 
10030   // Third pass: Loop over operands to prepare DAG-level operands.. As part of
10031   // this, assign virtual and physical registers for inputs and otput.
10032   for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
10033     // Assign Registers.
10034     SDISelAsmOperandInfo &RefOpInfo =
10035         OpInfo.isMatchingInputConstraint()
10036             ? ConstraintOperands[OpInfo.getMatchedOperand()]
10037             : OpInfo;
10038     const auto RegError =
10039         getRegistersForValue(DAG, getCurSDLoc(), OpInfo, RefOpInfo);
10040     if (RegError) {
10041       const MachineFunction &MF = DAG.getMachineFunction();
10042       const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
10043       const char *RegName = TRI.getName(*RegError);
10044       emitInlineAsmError(Call, "register '" + Twine(RegName) +
10045                                    "' allocated for constraint '" +
10046                                    Twine(OpInfo.ConstraintCode) +
10047                                    "' does not match required type");
10048       return;
10049     }
10050 
10051     auto DetectWriteToReservedRegister = [&]() {
10052       const MachineFunction &MF = DAG.getMachineFunction();
10053       const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
10054       for (unsigned Reg : OpInfo.AssignedRegs.Regs) {
10055         if (Register::isPhysicalRegister(Reg) &&
10056             TRI.isInlineAsmReadOnlyReg(MF, Reg)) {
10057           const char *RegName = TRI.getName(Reg);
10058           emitInlineAsmError(Call, "write to reserved register '" +
10059                                        Twine(RegName) + "'");
10060           return true;
10061         }
10062       }
10063       return false;
10064     };
10065     assert((OpInfo.ConstraintType != TargetLowering::C_Address ||
10066             (OpInfo.Type == InlineAsm::isInput &&
10067              !OpInfo.isMatchingInputConstraint())) &&
10068            "Only address as input operand is allowed.");
10069 
10070     switch (OpInfo.Type) {
10071     case InlineAsm::isOutput:
10072       if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
10073         const InlineAsm::ConstraintCode ConstraintID =
10074             TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
10075         assert(ConstraintID != InlineAsm::ConstraintCode::Unknown &&
10076                "Failed to convert memory constraint code to constraint id.");
10077 
10078         // Add information to the INLINEASM node to know about this output.
10079         InlineAsm::Flag OpFlags(InlineAsm::Kind::Mem, 1);
10080         OpFlags.setMemConstraint(ConstraintID);
10081         AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlags, getCurSDLoc(),
10082                                                         MVT::i32));
10083         AsmNodeOperands.push_back(OpInfo.CallOperand);
10084       } else {
10085         // Otherwise, this outputs to a register (directly for C_Register /
10086         // C_RegisterClass, and a target-defined fashion for
10087         // C_Immediate/C_Other). Find a register that we can use.
10088         if (OpInfo.AssignedRegs.Regs.empty()) {
10089           emitInlineAsmError(
10090               Call, "couldn't allocate output register for constraint '" +
10091                         Twine(OpInfo.ConstraintCode) + "'");
10092           return;
10093         }
10094 
10095         if (DetectWriteToReservedRegister())
10096           return;
10097 
10098         // Add information to the INLINEASM node to know that this register is
10099         // set.
10100         OpInfo.AssignedRegs.AddInlineAsmOperands(
10101             OpInfo.isEarlyClobber ? InlineAsm::Kind::RegDefEarlyClobber
10102                                   : InlineAsm::Kind::RegDef,
10103             false, 0, getCurSDLoc(), DAG, AsmNodeOperands);
10104       }
10105       break;
10106 
10107     case InlineAsm::isInput:
10108     case InlineAsm::isLabel: {
10109       SDValue InOperandVal = OpInfo.CallOperand;
10110 
10111       if (OpInfo.isMatchingInputConstraint()) {
10112         // If this is required to match an output register we have already set,
10113         // just use its register.
10114         auto CurOp = findMatchingInlineAsmOperand(OpInfo.getMatchedOperand(),
10115                                                   AsmNodeOperands);
10116         InlineAsm::Flag Flag(AsmNodeOperands[CurOp]->getAsZExtVal());
10117         if (Flag.isRegDefKind() || Flag.isRegDefEarlyClobberKind()) {
10118           if (OpInfo.isIndirect) {
10119             // This happens on gcc/testsuite/gcc.dg/pr8788-1.c
10120             emitInlineAsmError(Call, "inline asm not supported yet: "
10121                                      "don't know how to handle tied "
10122                                      "indirect register inputs");
10123             return;
10124           }
10125 
10126           SmallVector<Register, 4> Regs;
10127           MachineFunction &MF = DAG.getMachineFunction();
10128           MachineRegisterInfo &MRI = MF.getRegInfo();
10129           const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
10130           auto *R = cast<RegisterSDNode>(AsmNodeOperands[CurOp+1]);
10131           Register TiedReg = R->getReg();
10132           MVT RegVT = R->getSimpleValueType(0);
10133           const TargetRegisterClass *RC =
10134               TiedReg.isVirtual()     ? MRI.getRegClass(TiedReg)
10135               : RegVT != MVT::Untyped ? TLI.getRegClassFor(RegVT)
10136                                       : TRI.getMinimalPhysRegClass(TiedReg);
10137           for (unsigned i = 0, e = Flag.getNumOperandRegisters(); i != e; ++i)
10138             Regs.push_back(MRI.createVirtualRegister(RC));
10139 
10140           RegsForValue MatchedRegs(Regs, RegVT, InOperandVal.getValueType());
10141 
10142           SDLoc dl = getCurSDLoc();
10143           // Use the produced MatchedRegs object to
10144           MatchedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Glue, &Call);
10145           MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind::RegUse, true,
10146                                            OpInfo.getMatchedOperand(), dl, DAG,
10147                                            AsmNodeOperands);
10148           break;
10149         }
10150 
10151         assert(Flag.isMemKind() && "Unknown matching constraint!");
10152         assert(Flag.getNumOperandRegisters() == 1 &&
10153                "Unexpected number of operands");
10154         // Add information to the INLINEASM node to know about this input.
10155         // See InlineAsm.h isUseOperandTiedToDef.
10156         Flag.clearMemConstraint();
10157         Flag.setMatchingOp(OpInfo.getMatchedOperand());
10158         AsmNodeOperands.push_back(DAG.getTargetConstant(
10159             Flag, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
10160         AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
10161         break;
10162       }
10163 
10164       // Treat indirect 'X' constraint as memory.
10165       if (OpInfo.ConstraintType == TargetLowering::C_Other &&
10166           OpInfo.isIndirect)
10167         OpInfo.ConstraintType = TargetLowering::C_Memory;
10168 
10169       if (OpInfo.ConstraintType == TargetLowering::C_Immediate ||
10170           OpInfo.ConstraintType == TargetLowering::C_Other) {
10171         std::vector<SDValue> Ops;
10172         TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode,
10173                                           Ops, DAG);
10174         if (Ops.empty()) {
10175           if (OpInfo.ConstraintType == TargetLowering::C_Immediate)
10176             if (isa<ConstantSDNode>(InOperandVal)) {
10177               emitInlineAsmError(Call, "value out of range for constraint '" +
10178                                            Twine(OpInfo.ConstraintCode) + "'");
10179               return;
10180             }
10181 
10182           emitInlineAsmError(Call,
10183                              "invalid operand for inline asm constraint '" +
10184                                  Twine(OpInfo.ConstraintCode) + "'");
10185           return;
10186         }
10187 
10188         // Add information to the INLINEASM node to know about this input.
10189         InlineAsm::Flag ResOpType(InlineAsm::Kind::Imm, Ops.size());
10190         AsmNodeOperands.push_back(DAG.getTargetConstant(
10191             ResOpType, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
10192         llvm::append_range(AsmNodeOperands, Ops);
10193         break;
10194       }
10195 
10196       if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
10197         assert((OpInfo.isIndirect ||
10198                 OpInfo.ConstraintType != TargetLowering::C_Memory) &&
10199                "Operand must be indirect to be a mem!");
10200         assert(InOperandVal.getValueType() ==
10201                    TLI.getPointerTy(DAG.getDataLayout()) &&
10202                "Memory operands expect pointer values");
10203 
10204         const InlineAsm::ConstraintCode ConstraintID =
10205             TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
10206         assert(ConstraintID != InlineAsm::ConstraintCode::Unknown &&
10207                "Failed to convert memory constraint code to constraint id.");
10208 
10209         // Add information to the INLINEASM node to know about this input.
10210         InlineAsm::Flag ResOpType(InlineAsm::Kind::Mem, 1);
10211         ResOpType.setMemConstraint(ConstraintID);
10212         AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
10213                                                         getCurSDLoc(),
10214                                                         MVT::i32));
10215         AsmNodeOperands.push_back(InOperandVal);
10216         break;
10217       }
10218 
10219       if (OpInfo.ConstraintType == TargetLowering::C_Address) {
10220         const InlineAsm::ConstraintCode ConstraintID =
10221             TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
10222         assert(ConstraintID != InlineAsm::ConstraintCode::Unknown &&
10223                "Failed to convert memory constraint code to constraint id.");
10224 
10225         InlineAsm::Flag ResOpType(InlineAsm::Kind::Mem, 1);
10226 
10227         SDValue AsmOp = InOperandVal;
10228         if (isFunction(InOperandVal)) {
10229           auto *GA = cast<GlobalAddressSDNode>(InOperandVal);
10230           ResOpType = InlineAsm::Flag(InlineAsm::Kind::Func, 1);
10231           AsmOp = DAG.getTargetGlobalAddress(GA->getGlobal(), getCurSDLoc(),
10232                                              InOperandVal.getValueType(),
10233                                              GA->getOffset());
10234         }
10235 
10236         // Add information to the INLINEASM node to know about this input.
10237         ResOpType.setMemConstraint(ConstraintID);
10238 
10239         AsmNodeOperands.push_back(
10240             DAG.getTargetConstant(ResOpType, getCurSDLoc(), MVT::i32));
10241 
10242         AsmNodeOperands.push_back(AsmOp);
10243         break;
10244       }
10245 
10246       if (OpInfo.ConstraintType != TargetLowering::C_RegisterClass &&
10247           OpInfo.ConstraintType != TargetLowering::C_Register) {
10248         emitInlineAsmError(Call, "unknown asm constraint '" +
10249                                      Twine(OpInfo.ConstraintCode) + "'");
10250         return;
10251       }
10252 
10253       // TODO: Support this.
10254       if (OpInfo.isIndirect) {
10255         emitInlineAsmError(
10256             Call, "Don't know how to handle indirect register inputs yet "
10257                   "for constraint '" +
10258                       Twine(OpInfo.ConstraintCode) + "'");
10259         return;
10260       }
10261 
10262       // Copy the input into the appropriate registers.
10263       if (OpInfo.AssignedRegs.Regs.empty()) {
10264         emitInlineAsmError(Call,
10265                            "couldn't allocate input reg for constraint '" +
10266                                Twine(OpInfo.ConstraintCode) + "'");
10267         return;
10268       }
10269 
10270       if (DetectWriteToReservedRegister())
10271         return;
10272 
10273       SDLoc dl = getCurSDLoc();
10274 
10275       OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Glue,
10276                                         &Call);
10277 
10278       OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind::RegUse, false,
10279                                                0, dl, DAG, AsmNodeOperands);
10280       break;
10281     }
10282     case InlineAsm::isClobber:
10283       // Add the clobbered value to the operand list, so that the register
10284       // allocator is aware that the physreg got clobbered.
10285       if (!OpInfo.AssignedRegs.Regs.empty())
10286         OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind::Clobber,
10287                                                  false, 0, getCurSDLoc(), DAG,
10288                                                  AsmNodeOperands);
10289       break;
10290     }
10291   }
10292 
10293   // Finish up input operands.  Set the input chain and add the flag last.
10294   AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
10295   if (Glue.getNode()) AsmNodeOperands.push_back(Glue);
10296 
10297   unsigned ISDOpc = IsCallBr ? ISD::INLINEASM_BR : ISD::INLINEASM;
10298   Chain = DAG.getNode(ISDOpc, getCurSDLoc(),
10299                       DAG.getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
10300   Glue = Chain.getValue(1);
10301 
10302   // Do additional work to generate outputs.
10303 
10304   SmallVector<EVT, 1> ResultVTs;
10305   SmallVector<SDValue, 1> ResultValues;
10306   SmallVector<SDValue, 8> OutChains;
10307 
10308   llvm::Type *CallResultType = Call.getType();
10309   ArrayRef<Type *> ResultTypes;
10310   if (StructType *StructResult = dyn_cast<StructType>(CallResultType))
10311     ResultTypes = StructResult->elements();
10312   else if (!CallResultType->isVoidTy())
10313     ResultTypes = ArrayRef(CallResultType);
10314 
10315   auto CurResultType = ResultTypes.begin();
10316   auto handleRegAssign = [&](SDValue V) {
10317     assert(CurResultType != ResultTypes.end() && "Unexpected value");
10318     assert((*CurResultType)->isSized() && "Unexpected unsized type");
10319     EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), *CurResultType);
10320     ++CurResultType;
10321     // If the type of the inline asm call site return value is different but has
10322     // same size as the type of the asm output bitcast it.  One example of this
10323     // is for vectors with different width / number of elements.  This can
10324     // happen for register classes that can contain multiple different value
10325     // types.  The preg or vreg allocated may not have the same VT as was
10326     // expected.
10327     //
10328     // This can also happen for a return value that disagrees with the register
10329     // class it is put in, eg. a double in a general-purpose register on a
10330     // 32-bit machine.
10331     if (ResultVT != V.getValueType() &&
10332         ResultVT.getSizeInBits() == V.getValueSizeInBits())
10333       V = DAG.getNode(ISD::BITCAST, getCurSDLoc(), ResultVT, V);
10334     else if (ResultVT != V.getValueType() && ResultVT.isInteger() &&
10335              V.getValueType().isInteger()) {
10336       // If a result value was tied to an input value, the computed result
10337       // may have a wider width than the expected result.  Extract the
10338       // relevant portion.
10339       V = DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), ResultVT, V);
10340     }
10341     assert(ResultVT == V.getValueType() && "Asm result value mismatch!");
10342     ResultVTs.push_back(ResultVT);
10343     ResultValues.push_back(V);
10344   };
10345 
10346   // Deal with output operands.
10347   for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
10348     if (OpInfo.Type == InlineAsm::isOutput) {
10349       SDValue Val;
10350       // Skip trivial output operands.
10351       if (OpInfo.AssignedRegs.Regs.empty())
10352         continue;
10353 
10354       switch (OpInfo.ConstraintType) {
10355       case TargetLowering::C_Register:
10356       case TargetLowering::C_RegisterClass:
10357         Val = OpInfo.AssignedRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(),
10358                                                   Chain, &Glue, &Call);
10359         break;
10360       case TargetLowering::C_Immediate:
10361       case TargetLowering::C_Other:
10362         Val = TLI.LowerAsmOutputForConstraint(Chain, Glue, getCurSDLoc(),
10363                                               OpInfo, DAG);
10364         break;
10365       case TargetLowering::C_Memory:
10366         break; // Already handled.
10367       case TargetLowering::C_Address:
10368         break; // Silence warning.
10369       case TargetLowering::C_Unknown:
10370         assert(false && "Unexpected unknown constraint");
10371       }
10372 
10373       // Indirect output manifest as stores. Record output chains.
10374       if (OpInfo.isIndirect) {
10375         const Value *Ptr = OpInfo.CallOperandVal;
10376         assert(Ptr && "Expected value CallOperandVal for indirect asm operand");
10377         SDValue Store = DAG.getStore(Chain, getCurSDLoc(), Val, getValue(Ptr),
10378                                      MachinePointerInfo(Ptr));
10379         OutChains.push_back(Store);
10380       } else {
10381         // generate CopyFromRegs to associated registers.
10382         assert(!Call.getType()->isVoidTy() && "Bad inline asm!");
10383         if (Val.getOpcode() == ISD::MERGE_VALUES) {
10384           for (const SDValue &V : Val->op_values())
10385             handleRegAssign(V);
10386         } else
10387           handleRegAssign(Val);
10388       }
10389     }
10390   }
10391 
10392   // Set results.
10393   if (!ResultValues.empty()) {
10394     assert(CurResultType == ResultTypes.end() &&
10395            "Mismatch in number of ResultTypes");
10396     assert(ResultValues.size() == ResultTypes.size() &&
10397            "Mismatch in number of output operands in asm result");
10398 
10399     SDValue V = DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
10400                             DAG.getVTList(ResultVTs), ResultValues);
10401     setValue(&Call, V);
10402   }
10403 
10404   // Collect store chains.
10405   if (!OutChains.empty())
10406     Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, OutChains);
10407 
10408   if (EmitEHLabels) {
10409     Chain = lowerEndEH(Chain, cast<InvokeInst>(&Call), EHPadBB, BeginLabel);
10410   }
10411 
10412   // Only Update Root if inline assembly has a memory effect.
10413   if (ResultValues.empty() || HasSideEffect || !OutChains.empty() || IsCallBr ||
10414       EmitEHLabels)
10415     DAG.setRoot(Chain);
10416 }
10417 
10418 void SelectionDAGBuilder::emitInlineAsmError(const CallBase &Call,
10419                                              const Twine &Message) {
10420   LLVMContext &Ctx = *DAG.getContext();
10421   Ctx.emitError(&Call, Message);
10422 
10423   // Make sure we leave the DAG in a valid state
10424   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10425   SmallVector<EVT, 1> ValueVTs;
10426   ComputeValueVTs(TLI, DAG.getDataLayout(), Call.getType(), ValueVTs);
10427 
10428   if (ValueVTs.empty())
10429     return;
10430 
10431   SmallVector<SDValue, 1> Ops;
10432   for (const EVT &VT : ValueVTs)
10433     Ops.push_back(DAG.getUNDEF(VT));
10434 
10435   setValue(&Call, DAG.getMergeValues(Ops, getCurSDLoc()));
10436 }
10437 
10438 void SelectionDAGBuilder::visitVAStart(const CallInst &I) {
10439   DAG.setRoot(DAG.getNode(ISD::VASTART, getCurSDLoc(),
10440                           MVT::Other, getRoot(),
10441                           getValue(I.getArgOperand(0)),
10442                           DAG.getSrcValue(I.getArgOperand(0))));
10443 }
10444 
10445 void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
10446   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10447   const DataLayout &DL = DAG.getDataLayout();
10448   SDValue V = DAG.getVAArg(
10449       TLI.getMemValueType(DAG.getDataLayout(), I.getType()), getCurSDLoc(),
10450       getRoot(), getValue(I.getOperand(0)), DAG.getSrcValue(I.getOperand(0)),
10451       DL.getABITypeAlign(I.getType()).value());
10452   DAG.setRoot(V.getValue(1));
10453 
10454   if (I.getType()->isPointerTy())
10455     V = DAG.getPtrExtOrTrunc(
10456         V, getCurSDLoc(), TLI.getValueType(DAG.getDataLayout(), I.getType()));
10457   setValue(&I, V);
10458 }
10459 
10460 void SelectionDAGBuilder::visitVAEnd(const CallInst &I) {
10461   DAG.setRoot(DAG.getNode(ISD::VAEND, getCurSDLoc(),
10462                           MVT::Other, getRoot(),
10463                           getValue(I.getArgOperand(0)),
10464                           DAG.getSrcValue(I.getArgOperand(0))));
10465 }
10466 
10467 void SelectionDAGBuilder::visitVACopy(const CallInst &I) {
10468   DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurSDLoc(),
10469                           MVT::Other, getRoot(),
10470                           getValue(I.getArgOperand(0)),
10471                           getValue(I.getArgOperand(1)),
10472                           DAG.getSrcValue(I.getArgOperand(0)),
10473                           DAG.getSrcValue(I.getArgOperand(1))));
10474 }
10475 
10476 SDValue SelectionDAGBuilder::lowerRangeToAssertZExt(SelectionDAG &DAG,
10477                                                     const Instruction &I,
10478                                                     SDValue Op) {
10479   std::optional<ConstantRange> CR = getRange(I);
10480 
10481   if (!CR || CR->isFullSet() || CR->isEmptySet() || CR->isUpperWrapped())
10482     return Op;
10483 
10484   APInt Lo = CR->getUnsignedMin();
10485   if (!Lo.isMinValue())
10486     return Op;
10487 
10488   APInt Hi = CR->getUnsignedMax();
10489   unsigned Bits = std::max(Hi.getActiveBits(),
10490                            static_cast<unsigned>(IntegerType::MIN_INT_BITS));
10491 
10492   EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
10493 
10494   SDLoc SL = getCurSDLoc();
10495 
10496   SDValue ZExt = DAG.getNode(ISD::AssertZext, SL, Op.getValueType(), Op,
10497                              DAG.getValueType(SmallVT));
10498   unsigned NumVals = Op.getNode()->getNumValues();
10499   if (NumVals == 1)
10500     return ZExt;
10501 
10502   SmallVector<SDValue, 4> Ops;
10503 
10504   Ops.push_back(ZExt);
10505   for (unsigned I = 1; I != NumVals; ++I)
10506     Ops.push_back(Op.getValue(I));
10507 
10508   return DAG.getMergeValues(Ops, SL);
10509 }
10510 
10511 /// Populate a CallLowerinInfo (into \p CLI) based on the properties of
10512 /// the call being lowered.
10513 ///
10514 /// This is a helper for lowering intrinsics that follow a target calling
10515 /// convention or require stack pointer adjustment. Only a subset of the
10516 /// intrinsic's operands need to participate in the calling convention.
10517 void SelectionDAGBuilder::populateCallLoweringInfo(
10518     TargetLowering::CallLoweringInfo &CLI, const CallBase *Call,
10519     unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy,
10520     AttributeSet RetAttrs, bool IsPatchPoint) {
10521   TargetLowering::ArgListTy Args;
10522   Args.reserve(NumArgs);
10523 
10524   // Populate the argument list.
10525   // Attributes for args start at offset 1, after the return attribute.
10526   for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs;
10527        ArgI != ArgE; ++ArgI) {
10528     const Value *V = Call->getOperand(ArgI);
10529 
10530     assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
10531 
10532     TargetLowering::ArgListEntry Entry;
10533     Entry.Node = getValue(V);
10534     Entry.Ty = V->getType();
10535     Entry.setAttributes(Call, ArgI);
10536     Args.push_back(Entry);
10537   }
10538 
10539   CLI.setDebugLoc(getCurSDLoc())
10540       .setChain(getRoot())
10541       .setCallee(Call->getCallingConv(), ReturnTy, Callee, std::move(Args),
10542                  RetAttrs)
10543       .setDiscardResult(Call->use_empty())
10544       .setIsPatchPoint(IsPatchPoint)
10545       .setIsPreallocated(
10546           Call->countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0);
10547 }
10548 
10549 /// Add a stack map intrinsic call's live variable operands to a stackmap
10550 /// or patchpoint target node's operand list.
10551 ///
10552 /// Constants are converted to TargetConstants purely as an optimization to
10553 /// avoid constant materialization and register allocation.
10554 ///
10555 /// FrameIndex operands are converted to TargetFrameIndex so that ISEL does not
10556 /// generate addess computation nodes, and so FinalizeISel can convert the
10557 /// TargetFrameIndex into a DirectMemRefOp StackMap location. This avoids
10558 /// address materialization and register allocation, but may also be required
10559 /// for correctness. If a StackMap (or PatchPoint) intrinsic directly uses an
10560 /// alloca in the entry block, then the runtime may assume that the alloca's
10561 /// StackMap location can be read immediately after compilation and that the
10562 /// location is valid at any point during execution (this is similar to the
10563 /// assumption made by the llvm.gcroot intrinsic). If the alloca's location were
10564 /// only available in a register, then the runtime would need to trap when
10565 /// execution reaches the StackMap in order to read the alloca's location.
10566 static void addStackMapLiveVars(const CallBase &Call, unsigned StartIdx,
10567                                 const SDLoc &DL, SmallVectorImpl<SDValue> &Ops,
10568                                 SelectionDAGBuilder &Builder) {
10569   SelectionDAG &DAG = Builder.DAG;
10570   for (unsigned I = StartIdx; I < Call.arg_size(); I++) {
10571     SDValue Op = Builder.getValue(Call.getArgOperand(I));
10572 
10573     // Things on the stack are pointer-typed, meaning that they are already
10574     // legal and can be emitted directly to target nodes.
10575     if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op)) {
10576       Ops.push_back(DAG.getTargetFrameIndex(FI->getIndex(), Op.getValueType()));
10577     } else {
10578       // Otherwise emit a target independent node to be legalised.
10579       Ops.push_back(Builder.getValue(Call.getArgOperand(I)));
10580     }
10581   }
10582 }
10583 
10584 /// Lower llvm.experimental.stackmap.
10585 void SelectionDAGBuilder::visitStackmap(const CallInst &CI) {
10586   // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
10587   //                                  [live variables...])
10588 
10589   assert(CI.getType()->isVoidTy() && "Stackmap cannot return a value.");
10590 
10591   SDValue Chain, InGlue, Callee;
10592   SmallVector<SDValue, 32> Ops;
10593 
10594   SDLoc DL = getCurSDLoc();
10595   Callee = getValue(CI.getCalledOperand());
10596 
10597   // The stackmap intrinsic only records the live variables (the arguments
10598   // passed to it) and emits NOPS (if requested). Unlike the patchpoint
10599   // intrinsic, this won't be lowered to a function call. This means we don't
10600   // have to worry about calling conventions and target specific lowering code.
10601   // Instead we perform the call lowering right here.
10602   //
10603   // chain, flag = CALLSEQ_START(chain, 0, 0)
10604   // chain, flag = STACKMAP(id, nbytes, ..., chain, flag)
10605   // chain, flag = CALLSEQ_END(chain, 0, 0, flag)
10606   //
10607   Chain = DAG.getCALLSEQ_START(getRoot(), 0, 0, DL);
10608   InGlue = Chain.getValue(1);
10609 
10610   // Add the STACKMAP operands, starting with DAG house-keeping.
10611   Ops.push_back(Chain);
10612   Ops.push_back(InGlue);
10613 
10614   // Add the <id>, <numShadowBytes> operands.
10615   //
10616   // These do not require legalisation, and can be emitted directly to target
10617   // constant nodes.
10618   SDValue ID = getValue(CI.getArgOperand(0));
10619   assert(ID.getValueType() == MVT::i64);
10620   SDValue IDConst =
10621       DAG.getTargetConstant(ID->getAsZExtVal(), DL, ID.getValueType());
10622   Ops.push_back(IDConst);
10623 
10624   SDValue Shad = getValue(CI.getArgOperand(1));
10625   assert(Shad.getValueType() == MVT::i32);
10626   SDValue ShadConst =
10627       DAG.getTargetConstant(Shad->getAsZExtVal(), DL, Shad.getValueType());
10628   Ops.push_back(ShadConst);
10629 
10630   // Add the live variables.
10631   addStackMapLiveVars(CI, 2, DL, Ops, *this);
10632 
10633   // Create the STACKMAP node.
10634   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
10635   Chain = DAG.getNode(ISD::STACKMAP, DL, NodeTys, Ops);
10636   InGlue = Chain.getValue(1);
10637 
10638   Chain = DAG.getCALLSEQ_END(Chain, 0, 0, InGlue, DL);
10639 
10640   // Stackmaps don't generate values, so nothing goes into the NodeMap.
10641 
10642   // Set the root to the target-lowered call chain.
10643   DAG.setRoot(Chain);
10644 
10645   // Inform the Frame Information that we have a stackmap in this function.
10646   FuncInfo.MF->getFrameInfo().setHasStackMap();
10647 }
10648 
10649 /// Lower llvm.experimental.patchpoint directly to its target opcode.
10650 void SelectionDAGBuilder::visitPatchpoint(const CallBase &CB,
10651                                           const BasicBlock *EHPadBB) {
10652   // <ty> @llvm.experimental.patchpoint.<ty>(i64 <id>,
10653   //                                         i32 <numBytes>,
10654   //                                         i8* <target>,
10655   //                                         i32 <numArgs>,
10656   //                                         [Args...],
10657   //                                         [live variables...])
10658 
10659   CallingConv::ID CC = CB.getCallingConv();
10660   bool IsAnyRegCC = CC == CallingConv::AnyReg;
10661   bool HasDef = !CB.getType()->isVoidTy();
10662   SDLoc dl = getCurSDLoc();
10663   SDValue Callee = getValue(CB.getArgOperand(PatchPointOpers::TargetPos));
10664 
10665   // Handle immediate and symbolic callees.
10666   if (auto* ConstCallee = dyn_cast<ConstantSDNode>(Callee))
10667     Callee = DAG.getIntPtrConstant(ConstCallee->getZExtValue(), dl,
10668                                    /*isTarget=*/true);
10669   else if (auto* SymbolicCallee = dyn_cast<GlobalAddressSDNode>(Callee))
10670     Callee =  DAG.getTargetGlobalAddress(SymbolicCallee->getGlobal(),
10671                                          SDLoc(SymbolicCallee),
10672                                          SymbolicCallee->getValueType(0));
10673 
10674   // Get the real number of arguments participating in the call <numArgs>
10675   SDValue NArgVal = getValue(CB.getArgOperand(PatchPointOpers::NArgPos));
10676   unsigned NumArgs = NArgVal->getAsZExtVal();
10677 
10678   // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
10679   // Intrinsics include all meta-operands up to but not including CC.
10680   unsigned NumMetaOpers = PatchPointOpers::CCPos;
10681   assert(CB.arg_size() >= NumMetaOpers + NumArgs &&
10682          "Not enough arguments provided to the patchpoint intrinsic");
10683 
10684   // For AnyRegCC the arguments are lowered later on manually.
10685   unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
10686   Type *ReturnTy =
10687       IsAnyRegCC ? Type::getVoidTy(*DAG.getContext()) : CB.getType();
10688 
10689   TargetLowering::CallLoweringInfo CLI(DAG);
10690   populateCallLoweringInfo(CLI, &CB, NumMetaOpers, NumCallArgs, Callee,
10691                            ReturnTy, CB.getAttributes().getRetAttrs(), true);
10692   std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
10693 
10694   SDNode *CallEnd = Result.second.getNode();
10695   if (CallEnd->getOpcode() == ISD::EH_LABEL)
10696     CallEnd = CallEnd->getOperand(0).getNode();
10697   if (HasDef && (CallEnd->getOpcode() == ISD::CopyFromReg))
10698     CallEnd = CallEnd->getOperand(0).getNode();
10699 
10700   /// Get a call instruction from the call sequence chain.
10701   /// Tail calls are not allowed.
10702   assert(CallEnd->getOpcode() == ISD::CALLSEQ_END &&
10703          "Expected a callseq node.");
10704   SDNode *Call = CallEnd->getOperand(0).getNode();
10705   bool HasGlue = Call->getGluedNode();
10706 
10707   // Replace the target specific call node with the patchable intrinsic.
10708   SmallVector<SDValue, 8> Ops;
10709 
10710   // Push the chain.
10711   Ops.push_back(*(Call->op_begin()));
10712 
10713   // Optionally, push the glue (if any).
10714   if (HasGlue)
10715     Ops.push_back(*(Call->op_end() - 1));
10716 
10717   // Push the register mask info.
10718   if (HasGlue)
10719     Ops.push_back(*(Call->op_end() - 2));
10720   else
10721     Ops.push_back(*(Call->op_end() - 1));
10722 
10723   // Add the <id> and <numBytes> constants.
10724   SDValue IDVal = getValue(CB.getArgOperand(PatchPointOpers::IDPos));
10725   Ops.push_back(DAG.getTargetConstant(IDVal->getAsZExtVal(), dl, MVT::i64));
10726   SDValue NBytesVal = getValue(CB.getArgOperand(PatchPointOpers::NBytesPos));
10727   Ops.push_back(DAG.getTargetConstant(NBytesVal->getAsZExtVal(), dl, MVT::i32));
10728 
10729   // Add the callee.
10730   Ops.push_back(Callee);
10731 
10732   // Adjust <numArgs> to account for any arguments that have been passed on the
10733   // stack instead.
10734   // Call Node: Chain, Target, {Args}, RegMask, [Glue]
10735   unsigned NumCallRegArgs = Call->getNumOperands() - (HasGlue ? 4 : 3);
10736   NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs;
10737   Ops.push_back(DAG.getTargetConstant(NumCallRegArgs, dl, MVT::i32));
10738 
10739   // Add the calling convention
10740   Ops.push_back(DAG.getTargetConstant((unsigned)CC, dl, MVT::i32));
10741 
10742   // Add the arguments we omitted previously. The register allocator should
10743   // place these in any free register.
10744   if (IsAnyRegCC)
10745     for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i)
10746       Ops.push_back(getValue(CB.getArgOperand(i)));
10747 
10748   // Push the arguments from the call instruction.
10749   SDNode::op_iterator e = HasGlue ? Call->op_end()-2 : Call->op_end()-1;
10750   Ops.append(Call->op_begin() + 2, e);
10751 
10752   // Push live variables for the stack map.
10753   addStackMapLiveVars(CB, NumMetaOpers + NumArgs, dl, Ops, *this);
10754 
10755   SDVTList NodeTys;
10756   if (IsAnyRegCC && HasDef) {
10757     // Create the return types based on the intrinsic definition
10758     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10759     SmallVector<EVT, 3> ValueVTs;
10760     ComputeValueVTs(TLI, DAG.getDataLayout(), CB.getType(), ValueVTs);
10761     assert(ValueVTs.size() == 1 && "Expected only one return value type.");
10762 
10763     // There is always a chain and a glue type at the end
10764     ValueVTs.push_back(MVT::Other);
10765     ValueVTs.push_back(MVT::Glue);
10766     NodeTys = DAG.getVTList(ValueVTs);
10767   } else
10768     NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
10769 
10770   // Replace the target specific call node with a PATCHPOINT node.
10771   SDValue PPV = DAG.getNode(ISD::PATCHPOINT, dl, NodeTys, Ops);
10772 
10773   // Update the NodeMap.
10774   if (HasDef) {
10775     if (IsAnyRegCC)
10776       setValue(&CB, SDValue(PPV.getNode(), 0));
10777     else
10778       setValue(&CB, Result.first);
10779   }
10780 
10781   // Fixup the consumers of the intrinsic. The chain and glue may be used in the
10782   // call sequence. Furthermore the location of the chain and glue can change
10783   // when the AnyReg calling convention is used and the intrinsic returns a
10784   // value.
10785   if (IsAnyRegCC && HasDef) {
10786     SDValue From[] = {SDValue(Call, 0), SDValue(Call, 1)};
10787     SDValue To[] = {PPV.getValue(1), PPV.getValue(2)};
10788     DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
10789   } else
10790     DAG.ReplaceAllUsesWith(Call, PPV.getNode());
10791   DAG.DeleteNode(Call);
10792 
10793   // Inform the Frame Information that we have a patchpoint in this function.
10794   FuncInfo.MF->getFrameInfo().setHasPatchPoint();
10795 }
10796 
10797 void SelectionDAGBuilder::visitVectorReduce(const CallInst &I,
10798                                             unsigned Intrinsic) {
10799   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10800   SDValue Op1 = getValue(I.getArgOperand(0));
10801   SDValue Op2;
10802   if (I.arg_size() > 1)
10803     Op2 = getValue(I.getArgOperand(1));
10804   SDLoc dl = getCurSDLoc();
10805   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
10806   SDValue Res;
10807   SDNodeFlags SDFlags;
10808   if (auto *FPMO = dyn_cast<FPMathOperator>(&I))
10809     SDFlags.copyFMF(*FPMO);
10810 
10811   switch (Intrinsic) {
10812   case Intrinsic::vector_reduce_fadd:
10813     if (SDFlags.hasAllowReassociation())
10814       Res = DAG.getNode(ISD::FADD, dl, VT, Op1,
10815                         DAG.getNode(ISD::VECREDUCE_FADD, dl, VT, Op2, SDFlags),
10816                         SDFlags);
10817     else
10818       Res = DAG.getNode(ISD::VECREDUCE_SEQ_FADD, dl, VT, Op1, Op2, SDFlags);
10819     break;
10820   case Intrinsic::vector_reduce_fmul:
10821     if (SDFlags.hasAllowReassociation())
10822       Res = DAG.getNode(ISD::FMUL, dl, VT, Op1,
10823                         DAG.getNode(ISD::VECREDUCE_FMUL, dl, VT, Op2, SDFlags),
10824                         SDFlags);
10825     else
10826       Res = DAG.getNode(ISD::VECREDUCE_SEQ_FMUL, dl, VT, Op1, Op2, SDFlags);
10827     break;
10828   case Intrinsic::vector_reduce_add:
10829     Res = DAG.getNode(ISD::VECREDUCE_ADD, dl, VT, Op1);
10830     break;
10831   case Intrinsic::vector_reduce_mul:
10832     Res = DAG.getNode(ISD::VECREDUCE_MUL, dl, VT, Op1);
10833     break;
10834   case Intrinsic::vector_reduce_and:
10835     Res = DAG.getNode(ISD::VECREDUCE_AND, dl, VT, Op1);
10836     break;
10837   case Intrinsic::vector_reduce_or:
10838     Res = DAG.getNode(ISD::VECREDUCE_OR, dl, VT, Op1);
10839     break;
10840   case Intrinsic::vector_reduce_xor:
10841     Res = DAG.getNode(ISD::VECREDUCE_XOR, dl, VT, Op1);
10842     break;
10843   case Intrinsic::vector_reduce_smax:
10844     Res = DAG.getNode(ISD::VECREDUCE_SMAX, dl, VT, Op1);
10845     break;
10846   case Intrinsic::vector_reduce_smin:
10847     Res = DAG.getNode(ISD::VECREDUCE_SMIN, dl, VT, Op1);
10848     break;
10849   case Intrinsic::vector_reduce_umax:
10850     Res = DAG.getNode(ISD::VECREDUCE_UMAX, dl, VT, Op1);
10851     break;
10852   case Intrinsic::vector_reduce_umin:
10853     Res = DAG.getNode(ISD::VECREDUCE_UMIN, dl, VT, Op1);
10854     break;
10855   case Intrinsic::vector_reduce_fmax:
10856     Res = DAG.getNode(ISD::VECREDUCE_FMAX, dl, VT, Op1, SDFlags);
10857     break;
10858   case Intrinsic::vector_reduce_fmin:
10859     Res = DAG.getNode(ISD::VECREDUCE_FMIN, dl, VT, Op1, SDFlags);
10860     break;
10861   case Intrinsic::vector_reduce_fmaximum:
10862     Res = DAG.getNode(ISD::VECREDUCE_FMAXIMUM, dl, VT, Op1, SDFlags);
10863     break;
10864   case Intrinsic::vector_reduce_fminimum:
10865     Res = DAG.getNode(ISD::VECREDUCE_FMINIMUM, dl, VT, Op1, SDFlags);
10866     break;
10867   default:
10868     llvm_unreachable("Unhandled vector reduce intrinsic");
10869   }
10870   setValue(&I, Res);
10871 }
10872 
10873 /// Returns an AttributeList representing the attributes applied to the return
10874 /// value of the given call.
10875 static AttributeList getReturnAttrs(TargetLowering::CallLoweringInfo &CLI) {
10876   SmallVector<Attribute::AttrKind, 2> Attrs;
10877   if (CLI.RetSExt)
10878     Attrs.push_back(Attribute::SExt);
10879   if (CLI.RetZExt)
10880     Attrs.push_back(Attribute::ZExt);
10881   if (CLI.IsInReg)
10882     Attrs.push_back(Attribute::InReg);
10883 
10884   return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex,
10885                             Attrs);
10886 }
10887 
10888 /// TargetLowering::LowerCallTo - This is the default LowerCallTo
10889 /// implementation, which just calls LowerCall.
10890 /// FIXME: When all targets are
10891 /// migrated to using LowerCall, this hook should be integrated into SDISel.
10892 std::pair<SDValue, SDValue>
10893 TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
10894   // Handle the incoming return values from the call.
10895   CLI.Ins.clear();
10896   Type *OrigRetTy = CLI.RetTy;
10897   SmallVector<EVT, 4> RetTys;
10898   SmallVector<TypeSize, 4> Offsets;
10899   auto &DL = CLI.DAG.getDataLayout();
10900   ComputeValueVTs(*this, DL, CLI.RetTy, RetTys, &Offsets);
10901 
10902   if (CLI.IsPostTypeLegalization) {
10903     // If we are lowering a libcall after legalization, split the return type.
10904     SmallVector<EVT, 4> OldRetTys;
10905     SmallVector<TypeSize, 4> OldOffsets;
10906     RetTys.swap(OldRetTys);
10907     Offsets.swap(OldOffsets);
10908 
10909     for (size_t i = 0, e = OldRetTys.size(); i != e; ++i) {
10910       EVT RetVT = OldRetTys[i];
10911       uint64_t Offset = OldOffsets[i];
10912       MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), RetVT);
10913       unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), RetVT);
10914       unsigned RegisterVTByteSZ = RegisterVT.getSizeInBits() / 8;
10915       RetTys.append(NumRegs, RegisterVT);
10916       for (unsigned j = 0; j != NumRegs; ++j)
10917         Offsets.push_back(TypeSize::getFixed(Offset + j * RegisterVTByteSZ));
10918     }
10919   }
10920 
10921   SmallVector<ISD::OutputArg, 4> Outs;
10922   GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, *this, DL);
10923 
10924   bool CanLowerReturn =
10925       this->CanLowerReturn(CLI.CallConv, CLI.DAG.getMachineFunction(),
10926                            CLI.IsVarArg, Outs, CLI.RetTy->getContext());
10927 
10928   SDValue DemoteStackSlot;
10929   int DemoteStackIdx = -100;
10930   if (!CanLowerReturn) {
10931     // FIXME: equivalent assert?
10932     // assert(!CS.hasInAllocaArgument() &&
10933     //        "sret demotion is incompatible with inalloca");
10934     uint64_t TySize = DL.getTypeAllocSize(CLI.RetTy);
10935     Align Alignment = DL.getPrefTypeAlign(CLI.RetTy);
10936     MachineFunction &MF = CLI.DAG.getMachineFunction();
10937     DemoteStackIdx =
10938         MF.getFrameInfo().CreateStackObject(TySize, Alignment, false);
10939     Type *StackSlotPtrType = PointerType::get(CLI.RetTy,
10940                                               DL.getAllocaAddrSpace());
10941 
10942     DemoteStackSlot = CLI.DAG.getFrameIndex(DemoteStackIdx, getFrameIndexTy(DL));
10943     ArgListEntry Entry;
10944     Entry.Node = DemoteStackSlot;
10945     Entry.Ty = StackSlotPtrType;
10946     Entry.IsSExt = false;
10947     Entry.IsZExt = false;
10948     Entry.IsInReg = false;
10949     Entry.IsSRet = true;
10950     Entry.IsNest = false;
10951     Entry.IsByVal = false;
10952     Entry.IsByRef = false;
10953     Entry.IsReturned = false;
10954     Entry.IsSwiftSelf = false;
10955     Entry.IsSwiftAsync = false;
10956     Entry.IsSwiftError = false;
10957     Entry.IsCFGuardTarget = false;
10958     Entry.Alignment = Alignment;
10959     CLI.getArgs().insert(CLI.getArgs().begin(), Entry);
10960     CLI.NumFixedArgs += 1;
10961     CLI.getArgs()[0].IndirectType = CLI.RetTy;
10962     CLI.RetTy = Type::getVoidTy(CLI.RetTy->getContext());
10963 
10964     // sret demotion isn't compatible with tail-calls, since the sret argument
10965     // points into the callers stack frame.
10966     CLI.IsTailCall = false;
10967   } else {
10968     bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
10969         CLI.RetTy, CLI.CallConv, CLI.IsVarArg, DL);
10970     for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
10971       ISD::ArgFlagsTy Flags;
10972       if (NeedsRegBlock) {
10973         Flags.setInConsecutiveRegs();
10974         if (I == RetTys.size() - 1)
10975           Flags.setInConsecutiveRegsLast();
10976       }
10977       EVT VT = RetTys[I];
10978       MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
10979                                                      CLI.CallConv, VT);
10980       unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
10981                                                        CLI.CallConv, VT);
10982       for (unsigned i = 0; i != NumRegs; ++i) {
10983         ISD::InputArg MyFlags;
10984         MyFlags.Flags = Flags;
10985         MyFlags.VT = RegisterVT;
10986         MyFlags.ArgVT = VT;
10987         MyFlags.Used = CLI.IsReturnValueUsed;
10988         if (CLI.RetTy->isPointerTy()) {
10989           MyFlags.Flags.setPointer();
10990           MyFlags.Flags.setPointerAddrSpace(
10991               cast<PointerType>(CLI.RetTy)->getAddressSpace());
10992         }
10993         if (CLI.RetSExt)
10994           MyFlags.Flags.setSExt();
10995         if (CLI.RetZExt)
10996           MyFlags.Flags.setZExt();
10997         if (CLI.IsInReg)
10998           MyFlags.Flags.setInReg();
10999         CLI.Ins.push_back(MyFlags);
11000       }
11001     }
11002   }
11003 
11004   // We push in swifterror return as the last element of CLI.Ins.
11005   ArgListTy &Args = CLI.getArgs();
11006   if (supportSwiftError()) {
11007     for (const ArgListEntry &Arg : Args) {
11008       if (Arg.IsSwiftError) {
11009         ISD::InputArg MyFlags;
11010         MyFlags.VT = getPointerTy(DL);
11011         MyFlags.ArgVT = EVT(getPointerTy(DL));
11012         MyFlags.Flags.setSwiftError();
11013         CLI.Ins.push_back(MyFlags);
11014       }
11015     }
11016   }
11017 
11018   // Handle all of the outgoing arguments.
11019   CLI.Outs.clear();
11020   CLI.OutVals.clear();
11021   for (unsigned i = 0, e = Args.size(); i != e; ++i) {
11022     SmallVector<EVT, 4> ValueVTs;
11023     ComputeValueVTs(*this, DL, Args[i].Ty, ValueVTs);
11024     // FIXME: Split arguments if CLI.IsPostTypeLegalization
11025     Type *FinalType = Args[i].Ty;
11026     if (Args[i].IsByVal)
11027       FinalType = Args[i].IndirectType;
11028     bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
11029         FinalType, CLI.CallConv, CLI.IsVarArg, DL);
11030     for (unsigned Value = 0, NumValues = ValueVTs.size(); Value != NumValues;
11031          ++Value) {
11032       EVT VT = ValueVTs[Value];
11033       Type *ArgTy = VT.getTypeForEVT(CLI.RetTy->getContext());
11034       SDValue Op = SDValue(Args[i].Node.getNode(),
11035                            Args[i].Node.getResNo() + Value);
11036       ISD::ArgFlagsTy Flags;
11037 
11038       // Certain targets (such as MIPS), may have a different ABI alignment
11039       // for a type depending on the context. Give the target a chance to
11040       // specify the alignment it wants.
11041       const Align OriginalAlignment(getABIAlignmentForCallingConv(ArgTy, DL));
11042       Flags.setOrigAlign(OriginalAlignment);
11043 
11044       if (Args[i].Ty->isPointerTy()) {
11045         Flags.setPointer();
11046         Flags.setPointerAddrSpace(
11047             cast<PointerType>(Args[i].Ty)->getAddressSpace());
11048       }
11049       if (Args[i].IsZExt)
11050         Flags.setZExt();
11051       if (Args[i].IsSExt)
11052         Flags.setSExt();
11053       if (Args[i].IsNoExt)
11054         Flags.setNoExt();
11055       if (Args[i].IsInReg) {
11056         // If we are using vectorcall calling convention, a structure that is
11057         // passed InReg - is surely an HVA
11058         if (CLI.CallConv == CallingConv::X86_VectorCall &&
11059             isa<StructType>(FinalType)) {
11060           // The first value of a structure is marked
11061           if (0 == Value)
11062             Flags.setHvaStart();
11063           Flags.setHva();
11064         }
11065         // Set InReg Flag
11066         Flags.setInReg();
11067       }
11068       if (Args[i].IsSRet)
11069         Flags.setSRet();
11070       if (Args[i].IsSwiftSelf)
11071         Flags.setSwiftSelf();
11072       if (Args[i].IsSwiftAsync)
11073         Flags.setSwiftAsync();
11074       if (Args[i].IsSwiftError)
11075         Flags.setSwiftError();
11076       if (Args[i].IsCFGuardTarget)
11077         Flags.setCFGuardTarget();
11078       if (Args[i].IsByVal)
11079         Flags.setByVal();
11080       if (Args[i].IsByRef)
11081         Flags.setByRef();
11082       if (Args[i].IsPreallocated) {
11083         Flags.setPreallocated();
11084         // Set the byval flag for CCAssignFn callbacks that don't know about
11085         // preallocated.  This way we can know how many bytes we should've
11086         // allocated and how many bytes a callee cleanup function will pop.  If
11087         // we port preallocated to more targets, we'll have to add custom
11088         // preallocated handling in the various CC lowering callbacks.
11089         Flags.setByVal();
11090       }
11091       if (Args[i].IsInAlloca) {
11092         Flags.setInAlloca();
11093         // Set the byval flag for CCAssignFn callbacks that don't know about
11094         // inalloca.  This way we can know how many bytes we should've allocated
11095         // and how many bytes a callee cleanup function will pop.  If we port
11096         // inalloca to more targets, we'll have to add custom inalloca handling
11097         // in the various CC lowering callbacks.
11098         Flags.setByVal();
11099       }
11100       Align MemAlign;
11101       if (Args[i].IsByVal || Args[i].IsInAlloca || Args[i].IsPreallocated) {
11102         unsigned FrameSize = DL.getTypeAllocSize(Args[i].IndirectType);
11103         Flags.setByValSize(FrameSize);
11104 
11105         // info is not there but there are cases it cannot get right.
11106         if (auto MA = Args[i].Alignment)
11107           MemAlign = *MA;
11108         else
11109           MemAlign = Align(getByValTypeAlignment(Args[i].IndirectType, DL));
11110       } else if (auto MA = Args[i].Alignment) {
11111         MemAlign = *MA;
11112       } else {
11113         MemAlign = OriginalAlignment;
11114       }
11115       Flags.setMemAlign(MemAlign);
11116       if (Args[i].IsNest)
11117         Flags.setNest();
11118       if (NeedsRegBlock)
11119         Flags.setInConsecutiveRegs();
11120 
11121       MVT PartVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
11122                                                  CLI.CallConv, VT);
11123       unsigned NumParts = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
11124                                                         CLI.CallConv, VT);
11125       SmallVector<SDValue, 4> Parts(NumParts);
11126       ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
11127 
11128       if (Args[i].IsSExt)
11129         ExtendKind = ISD::SIGN_EXTEND;
11130       else if (Args[i].IsZExt)
11131         ExtendKind = ISD::ZERO_EXTEND;
11132 
11133       // Conservatively only handle 'returned' on non-vectors that can be lowered,
11134       // for now.
11135       if (Args[i].IsReturned && !Op.getValueType().isVector() &&
11136           CanLowerReturn) {
11137         assert((CLI.RetTy == Args[i].Ty ||
11138                 (CLI.RetTy->isPointerTy() && Args[i].Ty->isPointerTy() &&
11139                  CLI.RetTy->getPointerAddressSpace() ==
11140                      Args[i].Ty->getPointerAddressSpace())) &&
11141                RetTys.size() == NumValues && "unexpected use of 'returned'");
11142         // Before passing 'returned' to the target lowering code, ensure that
11143         // either the register MVT and the actual EVT are the same size or that
11144         // the return value and argument are extended in the same way; in these
11145         // cases it's safe to pass the argument register value unchanged as the
11146         // return register value (although it's at the target's option whether
11147         // to do so)
11148         // TODO: allow code generation to take advantage of partially preserved
11149         // registers rather than clobbering the entire register when the
11150         // parameter extension method is not compatible with the return
11151         // extension method
11152         if ((NumParts * PartVT.getSizeInBits() == VT.getSizeInBits()) ||
11153             (ExtendKind != ISD::ANY_EXTEND && CLI.RetSExt == Args[i].IsSExt &&
11154              CLI.RetZExt == Args[i].IsZExt))
11155           Flags.setReturned();
11156       }
11157 
11158       getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT, CLI.CB,
11159                      CLI.CallConv, ExtendKind);
11160 
11161       for (unsigned j = 0; j != NumParts; ++j) {
11162         // if it isn't first piece, alignment must be 1
11163         // For scalable vectors the scalable part is currently handled
11164         // by individual targets, so we just use the known minimum size here.
11165         ISD::OutputArg MyFlags(
11166             Flags, Parts[j].getValueType().getSimpleVT(), VT,
11167             i < CLI.NumFixedArgs, i,
11168             j * Parts[j].getValueType().getStoreSize().getKnownMinValue());
11169         if (NumParts > 1 && j == 0)
11170           MyFlags.Flags.setSplit();
11171         else if (j != 0) {
11172           MyFlags.Flags.setOrigAlign(Align(1));
11173           if (j == NumParts - 1)
11174             MyFlags.Flags.setSplitEnd();
11175         }
11176 
11177         CLI.Outs.push_back(MyFlags);
11178         CLI.OutVals.push_back(Parts[j]);
11179       }
11180 
11181       if (NeedsRegBlock && Value == NumValues - 1)
11182         CLI.Outs[CLI.Outs.size() - 1].Flags.setInConsecutiveRegsLast();
11183     }
11184   }
11185 
11186   SmallVector<SDValue, 4> InVals;
11187   CLI.Chain = LowerCall(CLI, InVals);
11188 
11189   // Update CLI.InVals to use outside of this function.
11190   CLI.InVals = InVals;
11191 
11192   // Verify that the target's LowerCall behaved as expected.
11193   assert(CLI.Chain.getNode() && CLI.Chain.getValueType() == MVT::Other &&
11194          "LowerCall didn't return a valid chain!");
11195   assert((!CLI.IsTailCall || InVals.empty()) &&
11196          "LowerCall emitted a return value for a tail call!");
11197   assert((CLI.IsTailCall || InVals.size() == CLI.Ins.size()) &&
11198          "LowerCall didn't emit the correct number of values!");
11199 
11200   // For a tail call, the return value is merely live-out and there aren't
11201   // any nodes in the DAG representing it. Return a special value to
11202   // indicate that a tail call has been emitted and no more Instructions
11203   // should be processed in the current block.
11204   if (CLI.IsTailCall) {
11205     CLI.DAG.setRoot(CLI.Chain);
11206     return std::make_pair(SDValue(), SDValue());
11207   }
11208 
11209 #ifndef NDEBUG
11210   for (unsigned i = 0, e = CLI.Ins.size(); i != e; ++i) {
11211     assert(InVals[i].getNode() && "LowerCall emitted a null value!");
11212     assert(EVT(CLI.Ins[i].VT) == InVals[i].getValueType() &&
11213            "LowerCall emitted a value with the wrong type!");
11214   }
11215 #endif
11216 
11217   SmallVector<SDValue, 4> ReturnValues;
11218   if (!CanLowerReturn) {
11219     // The instruction result is the result of loading from the
11220     // hidden sret parameter.
11221     SmallVector<EVT, 1> PVTs;
11222     Type *PtrRetTy =
11223         PointerType::get(OrigRetTy->getContext(), DL.getAllocaAddrSpace());
11224 
11225     ComputeValueVTs(*this, DL, PtrRetTy, PVTs);
11226     assert(PVTs.size() == 1 && "Pointers should fit in one register");
11227     EVT PtrVT = PVTs[0];
11228 
11229     unsigned NumValues = RetTys.size();
11230     ReturnValues.resize(NumValues);
11231     SmallVector<SDValue, 4> Chains(NumValues);
11232 
11233     // An aggregate return value cannot wrap around the address space, so
11234     // offsets to its parts don't wrap either.
11235     MachineFunction &MF = CLI.DAG.getMachineFunction();
11236     Align HiddenSRetAlign = MF.getFrameInfo().getObjectAlign(DemoteStackIdx);
11237     for (unsigned i = 0; i < NumValues; ++i) {
11238       SDValue Add =
11239           CLI.DAG.getNode(ISD::ADD, CLI.DL, PtrVT, DemoteStackSlot,
11240                           CLI.DAG.getConstant(Offsets[i], CLI.DL, PtrVT),
11241                           SDNodeFlags::NoUnsignedWrap);
11242       SDValue L = CLI.DAG.getLoad(
11243           RetTys[i], CLI.DL, CLI.Chain, Add,
11244           MachinePointerInfo::getFixedStack(CLI.DAG.getMachineFunction(),
11245                                             DemoteStackIdx, Offsets[i]),
11246           HiddenSRetAlign);
11247       ReturnValues[i] = L;
11248       Chains[i] = L.getValue(1);
11249     }
11250 
11251     CLI.Chain = CLI.DAG.getNode(ISD::TokenFactor, CLI.DL, MVT::Other, Chains);
11252   } else {
11253     // Collect the legal value parts into potentially illegal values
11254     // that correspond to the original function's return values.
11255     std::optional<ISD::NodeType> AssertOp;
11256     if (CLI.RetSExt)
11257       AssertOp = ISD::AssertSext;
11258     else if (CLI.RetZExt)
11259       AssertOp = ISD::AssertZext;
11260     unsigned CurReg = 0;
11261     for (EVT VT : RetTys) {
11262       MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
11263                                                      CLI.CallConv, VT);
11264       unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
11265                                                        CLI.CallConv, VT);
11266 
11267       ReturnValues.push_back(getCopyFromParts(
11268           CLI.DAG, CLI.DL, &InVals[CurReg], NumRegs, RegisterVT, VT, nullptr,
11269           CLI.Chain, CLI.CallConv, AssertOp));
11270       CurReg += NumRegs;
11271     }
11272 
11273     // For a function returning void, there is no return value. We can't create
11274     // such a node, so we just return a null return value in that case. In
11275     // that case, nothing will actually look at the value.
11276     if (ReturnValues.empty())
11277       return std::make_pair(SDValue(), CLI.Chain);
11278   }
11279 
11280   SDValue Res = CLI.DAG.getNode(ISD::MERGE_VALUES, CLI.DL,
11281                                 CLI.DAG.getVTList(RetTys), ReturnValues);
11282   return std::make_pair(Res, CLI.Chain);
11283 }
11284 
11285 /// Places new result values for the node in Results (their number
11286 /// and types must exactly match those of the original return values of
11287 /// the node), or leaves Results empty, which indicates that the node is not
11288 /// to be custom lowered after all.
11289 void TargetLowering::LowerOperationWrapper(SDNode *N,
11290                                            SmallVectorImpl<SDValue> &Results,
11291                                            SelectionDAG &DAG) const {
11292   SDValue Res = LowerOperation(SDValue(N, 0), DAG);
11293 
11294   if (!Res.getNode())
11295     return;
11296 
11297   // If the original node has one result, take the return value from
11298   // LowerOperation as is. It might not be result number 0.
11299   if (N->getNumValues() == 1) {
11300     Results.push_back(Res);
11301     return;
11302   }
11303 
11304   // If the original node has multiple results, then the return node should
11305   // have the same number of results.
11306   assert((N->getNumValues() == Res->getNumValues()) &&
11307       "Lowering returned the wrong number of results!");
11308 
11309   // Places new result values base on N result number.
11310   for (unsigned I = 0, E = N->getNumValues(); I != E; ++I)
11311     Results.push_back(Res.getValue(I));
11312 }
11313 
11314 SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
11315   llvm_unreachable("LowerOperation not implemented for this target!");
11316 }
11317 
11318 void SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V,
11319                                                      unsigned Reg,
11320                                                      ISD::NodeType ExtendType) {
11321   SDValue Op = getNonRegisterValue(V);
11322   assert((Op.getOpcode() != ISD::CopyFromReg ||
11323           cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
11324          "Copy from a reg to the same reg!");
11325   assert(!Register::isPhysicalRegister(Reg) && "Is a physreg");
11326 
11327   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11328   // If this is an InlineAsm we have to match the registers required, not the
11329   // notional registers required by the type.
11330 
11331   RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg, V->getType(),
11332                    std::nullopt); // This is not an ABI copy.
11333   SDValue Chain = DAG.getEntryNode();
11334 
11335   if (ExtendType == ISD::ANY_EXTEND) {
11336     auto PreferredExtendIt = FuncInfo.PreferredExtendType.find(V);
11337     if (PreferredExtendIt != FuncInfo.PreferredExtendType.end())
11338       ExtendType = PreferredExtendIt->second;
11339   }
11340   RFV.getCopyToRegs(Op, DAG, getCurSDLoc(), Chain, nullptr, V, ExtendType);
11341   PendingExports.push_back(Chain);
11342 }
11343 
11344 #include "llvm/CodeGen/SelectionDAGISel.h"
11345 
11346 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
11347 /// entry block, return true.  This includes arguments used by switches, since
11348 /// the switch may expand into multiple basic blocks.
11349 static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) {
11350   // With FastISel active, we may be splitting blocks, so force creation
11351   // of virtual registers for all non-dead arguments.
11352   if (FastISel)
11353     return A->use_empty();
11354 
11355   const BasicBlock &Entry = A->getParent()->front();
11356   for (const User *U : A->users())
11357     if (cast<Instruction>(U)->getParent() != &Entry || isa<SwitchInst>(U))
11358       return false;  // Use not in entry block.
11359 
11360   return true;
11361 }
11362 
11363 using ArgCopyElisionMapTy =
11364     DenseMap<const Argument *,
11365              std::pair<const AllocaInst *, const StoreInst *>>;
11366 
11367 /// Scan the entry block of the function in FuncInfo for arguments that look
11368 /// like copies into a local alloca. Record any copied arguments in
11369 /// ArgCopyElisionCandidates.
11370 static void
11371 findArgumentCopyElisionCandidates(const DataLayout &DL,
11372                                   FunctionLoweringInfo *FuncInfo,
11373                                   ArgCopyElisionMapTy &ArgCopyElisionCandidates) {
11374   // Record the state of every static alloca used in the entry block. Argument
11375   // allocas are all used in the entry block, so we need approximately as many
11376   // entries as we have arguments.
11377   enum StaticAllocaInfo { Unknown, Clobbered, Elidable };
11378   SmallDenseMap<const AllocaInst *, StaticAllocaInfo, 8> StaticAllocas;
11379   unsigned NumArgs = FuncInfo->Fn->arg_size();
11380   StaticAllocas.reserve(NumArgs * 2);
11381 
11382   auto GetInfoIfStaticAlloca = [&](const Value *V) -> StaticAllocaInfo * {
11383     if (!V)
11384       return nullptr;
11385     V = V->stripPointerCasts();
11386     const auto *AI = dyn_cast<AllocaInst>(V);
11387     if (!AI || !AI->isStaticAlloca() || !FuncInfo->StaticAllocaMap.count(AI))
11388       return nullptr;
11389     auto Iter = StaticAllocas.insert({AI, Unknown});
11390     return &Iter.first->second;
11391   };
11392 
11393   // Look for stores of arguments to static allocas. Look through bitcasts and
11394   // GEPs to handle type coercions, as long as the alloca is fully initialized
11395   // by the store. Any non-store use of an alloca escapes it and any subsequent
11396   // unanalyzed store might write it.
11397   // FIXME: Handle structs initialized with multiple stores.
11398   for (const Instruction &I : FuncInfo->Fn->getEntryBlock()) {
11399     // Look for stores, and handle non-store uses conservatively.
11400     const auto *SI = dyn_cast<StoreInst>(&I);
11401     if (!SI) {
11402       // We will look through cast uses, so ignore them completely.
11403       if (I.isCast())
11404         continue;
11405       // Ignore debug info and pseudo op intrinsics, they don't escape or store
11406       // to allocas.
11407       if (I.isDebugOrPseudoInst())
11408         continue;
11409       // This is an unknown instruction. Assume it escapes or writes to all
11410       // static alloca operands.
11411       for (const Use &U : I.operands()) {
11412         if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(U))
11413           *Info = StaticAllocaInfo::Clobbered;
11414       }
11415       continue;
11416     }
11417 
11418     // If the stored value is a static alloca, mark it as escaped.
11419     if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(SI->getValueOperand()))
11420       *Info = StaticAllocaInfo::Clobbered;
11421 
11422     // Check if the destination is a static alloca.
11423     const Value *Dst = SI->getPointerOperand()->stripPointerCasts();
11424     StaticAllocaInfo *Info = GetInfoIfStaticAlloca(Dst);
11425     if (!Info)
11426       continue;
11427     const AllocaInst *AI = cast<AllocaInst>(Dst);
11428 
11429     // Skip allocas that have been initialized or clobbered.
11430     if (*Info != StaticAllocaInfo::Unknown)
11431       continue;
11432 
11433     // Check if the stored value is an argument, and that this store fully
11434     // initializes the alloca.
11435     // If the argument type has padding bits we can't directly forward a pointer
11436     // as the upper bits may contain garbage.
11437     // Don't elide copies from the same argument twice.
11438     const Value *Val = SI->getValueOperand()->stripPointerCasts();
11439     const auto *Arg = dyn_cast<Argument>(Val);
11440     if (!Arg || Arg->hasPassPointeeByValueCopyAttr() ||
11441         Arg->getType()->isEmptyTy() ||
11442         DL.getTypeStoreSize(Arg->getType()) !=
11443             DL.getTypeAllocSize(AI->getAllocatedType()) ||
11444         !DL.typeSizeEqualsStoreSize(Arg->getType()) ||
11445         ArgCopyElisionCandidates.count(Arg)) {
11446       *Info = StaticAllocaInfo::Clobbered;
11447       continue;
11448     }
11449 
11450     LLVM_DEBUG(dbgs() << "Found argument copy elision candidate: " << *AI
11451                       << '\n');
11452 
11453     // Mark this alloca and store for argument copy elision.
11454     *Info = StaticAllocaInfo::Elidable;
11455     ArgCopyElisionCandidates.insert({Arg, {AI, SI}});
11456 
11457     // Stop scanning if we've seen all arguments. This will happen early in -O0
11458     // builds, which is useful, because -O0 builds have large entry blocks and
11459     // many allocas.
11460     if (ArgCopyElisionCandidates.size() == NumArgs)
11461       break;
11462   }
11463 }
11464 
11465 /// Try to elide argument copies from memory into a local alloca. Succeeds if
11466 /// ArgVal is a load from a suitable fixed stack object.
11467 static void tryToElideArgumentCopy(
11468     FunctionLoweringInfo &FuncInfo, SmallVectorImpl<SDValue> &Chains,
11469     DenseMap<int, int> &ArgCopyElisionFrameIndexMap,
11470     SmallPtrSetImpl<const Instruction *> &ElidedArgCopyInstrs,
11471     ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg,
11472     ArrayRef<SDValue> ArgVals, bool &ArgHasUses) {
11473   // Check if this is a load from a fixed stack object.
11474   auto *LNode = dyn_cast<LoadSDNode>(ArgVals[0]);
11475   if (!LNode)
11476     return;
11477   auto *FINode = dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode());
11478   if (!FINode)
11479     return;
11480 
11481   // Check that the fixed stack object is the right size and alignment.
11482   // Look at the alignment that the user wrote on the alloca instead of looking
11483   // at the stack object.
11484   auto ArgCopyIter = ArgCopyElisionCandidates.find(&Arg);
11485   assert(ArgCopyIter != ArgCopyElisionCandidates.end());
11486   const AllocaInst *AI = ArgCopyIter->second.first;
11487   int FixedIndex = FINode->getIndex();
11488   int &AllocaIndex = FuncInfo.StaticAllocaMap[AI];
11489   int OldIndex = AllocaIndex;
11490   MachineFrameInfo &MFI = FuncInfo.MF->getFrameInfo();
11491   if (MFI.getObjectSize(FixedIndex) != MFI.getObjectSize(OldIndex)) {
11492     LLVM_DEBUG(
11493         dbgs() << "  argument copy elision failed due to bad fixed stack "
11494                   "object size\n");
11495     return;
11496   }
11497   Align RequiredAlignment = AI->getAlign();
11498   if (MFI.getObjectAlign(FixedIndex) < RequiredAlignment) {
11499     LLVM_DEBUG(dbgs() << "  argument copy elision failed: alignment of alloca "
11500                          "greater than stack argument alignment ("
11501                       << DebugStr(RequiredAlignment) << " vs "
11502                       << DebugStr(MFI.getObjectAlign(FixedIndex)) << ")\n");
11503     return;
11504   }
11505 
11506   // Perform the elision. Delete the old stack object and replace its only use
11507   // in the variable info map. Mark the stack object as mutable and aliased.
11508   LLVM_DEBUG({
11509     dbgs() << "Eliding argument copy from " << Arg << " to " << *AI << '\n'
11510            << "  Replacing frame index " << OldIndex << " with " << FixedIndex
11511            << '\n';
11512   });
11513   MFI.RemoveStackObject(OldIndex);
11514   MFI.setIsImmutableObjectIndex(FixedIndex, false);
11515   MFI.setIsAliasedObjectIndex(FixedIndex, true);
11516   AllocaIndex = FixedIndex;
11517   ArgCopyElisionFrameIndexMap.insert({OldIndex, FixedIndex});
11518   for (SDValue ArgVal : ArgVals)
11519     Chains.push_back(ArgVal.getValue(1));
11520 
11521   // Avoid emitting code for the store implementing the copy.
11522   const StoreInst *SI = ArgCopyIter->second.second;
11523   ElidedArgCopyInstrs.insert(SI);
11524 
11525   // Check for uses of the argument again so that we can avoid exporting ArgVal
11526   // if it is't used by anything other than the store.
11527   for (const Value *U : Arg.users()) {
11528     if (U != SI) {
11529       ArgHasUses = true;
11530       break;
11531     }
11532   }
11533 }
11534 
11535 void SelectionDAGISel::LowerArguments(const Function &F) {
11536   SelectionDAG &DAG = SDB->DAG;
11537   SDLoc dl = SDB->getCurSDLoc();
11538   const DataLayout &DL = DAG.getDataLayout();
11539   SmallVector<ISD::InputArg, 16> Ins;
11540 
11541   // In Naked functions we aren't going to save any registers.
11542   if (F.hasFnAttribute(Attribute::Naked))
11543     return;
11544 
11545   if (!FuncInfo->CanLowerReturn) {
11546     // Put in an sret pointer parameter before all the other parameters.
11547     SmallVector<EVT, 1> ValueVTs;
11548     ComputeValueVTs(*TLI, DAG.getDataLayout(),
11549                     PointerType::get(F.getContext(),
11550                                      DAG.getDataLayout().getAllocaAddrSpace()),
11551                     ValueVTs);
11552 
11553     // NOTE: Assuming that a pointer will never break down to more than one VT
11554     // or one register.
11555     ISD::ArgFlagsTy Flags;
11556     Flags.setSRet();
11557     MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVTs[0]);
11558     ISD::InputArg RetArg(Flags, RegisterVT, ValueVTs[0], true,
11559                          ISD::InputArg::NoArgIndex, 0);
11560     Ins.push_back(RetArg);
11561   }
11562 
11563   // Look for stores of arguments to static allocas. Mark such arguments with a
11564   // flag to ask the target to give us the memory location of that argument if
11565   // available.
11566   ArgCopyElisionMapTy ArgCopyElisionCandidates;
11567   findArgumentCopyElisionCandidates(DL, FuncInfo.get(),
11568                                     ArgCopyElisionCandidates);
11569 
11570   // Set up the incoming argument description vector.
11571   for (const Argument &Arg : F.args()) {
11572     unsigned ArgNo = Arg.getArgNo();
11573     SmallVector<EVT, 4> ValueVTs;
11574     ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
11575     bool isArgValueUsed = !Arg.use_empty();
11576     unsigned PartBase = 0;
11577     Type *FinalType = Arg.getType();
11578     if (Arg.hasAttribute(Attribute::ByVal))
11579       FinalType = Arg.getParamByValType();
11580     bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
11581         FinalType, F.getCallingConv(), F.isVarArg(), DL);
11582     for (unsigned Value = 0, NumValues = ValueVTs.size();
11583          Value != NumValues; ++Value) {
11584       EVT VT = ValueVTs[Value];
11585       Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
11586       ISD::ArgFlagsTy Flags;
11587 
11588 
11589       if (Arg.getType()->isPointerTy()) {
11590         Flags.setPointer();
11591         Flags.setPointerAddrSpace(
11592             cast<PointerType>(Arg.getType())->getAddressSpace());
11593       }
11594       if (Arg.hasAttribute(Attribute::ZExt))
11595         Flags.setZExt();
11596       if (Arg.hasAttribute(Attribute::SExt))
11597         Flags.setSExt();
11598       if (Arg.hasAttribute(Attribute::InReg)) {
11599         // If we are using vectorcall calling convention, a structure that is
11600         // passed InReg - is surely an HVA
11601         if (F.getCallingConv() == CallingConv::X86_VectorCall &&
11602             isa<StructType>(Arg.getType())) {
11603           // The first value of a structure is marked
11604           if (0 == Value)
11605             Flags.setHvaStart();
11606           Flags.setHva();
11607         }
11608         // Set InReg Flag
11609         Flags.setInReg();
11610       }
11611       if (Arg.hasAttribute(Attribute::StructRet))
11612         Flags.setSRet();
11613       if (Arg.hasAttribute(Attribute::SwiftSelf))
11614         Flags.setSwiftSelf();
11615       if (Arg.hasAttribute(Attribute::SwiftAsync))
11616         Flags.setSwiftAsync();
11617       if (Arg.hasAttribute(Attribute::SwiftError))
11618         Flags.setSwiftError();
11619       if (Arg.hasAttribute(Attribute::ByVal))
11620         Flags.setByVal();
11621       if (Arg.hasAttribute(Attribute::ByRef))
11622         Flags.setByRef();
11623       if (Arg.hasAttribute(Attribute::InAlloca)) {
11624         Flags.setInAlloca();
11625         // Set the byval flag for CCAssignFn callbacks that don't know about
11626         // inalloca.  This way we can know how many bytes we should've allocated
11627         // and how many bytes a callee cleanup function will pop.  If we port
11628         // inalloca to more targets, we'll have to add custom inalloca handling
11629         // in the various CC lowering callbacks.
11630         Flags.setByVal();
11631       }
11632       if (Arg.hasAttribute(Attribute::Preallocated)) {
11633         Flags.setPreallocated();
11634         // Set the byval flag for CCAssignFn callbacks that don't know about
11635         // preallocated.  This way we can know how many bytes we should've
11636         // allocated and how many bytes a callee cleanup function will pop.  If
11637         // we port preallocated to more targets, we'll have to add custom
11638         // preallocated handling in the various CC lowering callbacks.
11639         Flags.setByVal();
11640       }
11641 
11642       // Certain targets (such as MIPS), may have a different ABI alignment
11643       // for a type depending on the context. Give the target a chance to
11644       // specify the alignment it wants.
11645       const Align OriginalAlignment(
11646           TLI->getABIAlignmentForCallingConv(ArgTy, DL));
11647       Flags.setOrigAlign(OriginalAlignment);
11648 
11649       Align MemAlign;
11650       Type *ArgMemTy = nullptr;
11651       if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated() ||
11652           Flags.isByRef()) {
11653         if (!ArgMemTy)
11654           ArgMemTy = Arg.getPointeeInMemoryValueType();
11655 
11656         uint64_t MemSize = DL.getTypeAllocSize(ArgMemTy);
11657 
11658         // For in-memory arguments, size and alignment should be passed from FE.
11659         // BE will guess if this info is not there but there are cases it cannot
11660         // get right.
11661         if (auto ParamAlign = Arg.getParamStackAlign())
11662           MemAlign = *ParamAlign;
11663         else if ((ParamAlign = Arg.getParamAlign()))
11664           MemAlign = *ParamAlign;
11665         else
11666           MemAlign = Align(TLI->getByValTypeAlignment(ArgMemTy, DL));
11667         if (Flags.isByRef())
11668           Flags.setByRefSize(MemSize);
11669         else
11670           Flags.setByValSize(MemSize);
11671       } else if (auto ParamAlign = Arg.getParamStackAlign()) {
11672         MemAlign = *ParamAlign;
11673       } else {
11674         MemAlign = OriginalAlignment;
11675       }
11676       Flags.setMemAlign(MemAlign);
11677 
11678       if (Arg.hasAttribute(Attribute::Nest))
11679         Flags.setNest();
11680       if (NeedsRegBlock)
11681         Flags.setInConsecutiveRegs();
11682       if (ArgCopyElisionCandidates.count(&Arg))
11683         Flags.setCopyElisionCandidate();
11684       if (Arg.hasAttribute(Attribute::Returned))
11685         Flags.setReturned();
11686 
11687       MVT RegisterVT = TLI->getRegisterTypeForCallingConv(
11688           *CurDAG->getContext(), F.getCallingConv(), VT);
11689       unsigned NumRegs = TLI->getNumRegistersForCallingConv(
11690           *CurDAG->getContext(), F.getCallingConv(), VT);
11691       for (unsigned i = 0; i != NumRegs; ++i) {
11692         // For scalable vectors, use the minimum size; individual targets
11693         // are responsible for handling scalable vector arguments and
11694         // return values.
11695         ISD::InputArg MyFlags(
11696             Flags, RegisterVT, VT, isArgValueUsed, ArgNo,
11697             PartBase + i * RegisterVT.getStoreSize().getKnownMinValue());
11698         if (NumRegs > 1 && i == 0)
11699           MyFlags.Flags.setSplit();
11700         // if it isn't first piece, alignment must be 1
11701         else if (i > 0) {
11702           MyFlags.Flags.setOrigAlign(Align(1));
11703           if (i == NumRegs - 1)
11704             MyFlags.Flags.setSplitEnd();
11705         }
11706         Ins.push_back(MyFlags);
11707       }
11708       if (NeedsRegBlock && Value == NumValues - 1)
11709         Ins[Ins.size() - 1].Flags.setInConsecutiveRegsLast();
11710       PartBase += VT.getStoreSize().getKnownMinValue();
11711     }
11712   }
11713 
11714   // Call the target to set up the argument values.
11715   SmallVector<SDValue, 8> InVals;
11716   SDValue NewRoot = TLI->LowerFormalArguments(
11717       DAG.getRoot(), F.getCallingConv(), F.isVarArg(), Ins, dl, DAG, InVals);
11718 
11719   // Verify that the target's LowerFormalArguments behaved as expected.
11720   assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other &&
11721          "LowerFormalArguments didn't return a valid chain!");
11722   assert(InVals.size() == Ins.size() &&
11723          "LowerFormalArguments didn't emit the correct number of values!");
11724   LLVM_DEBUG({
11725     for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
11726       assert(InVals[i].getNode() &&
11727              "LowerFormalArguments emitted a null value!");
11728       assert(EVT(Ins[i].VT) == InVals[i].getValueType() &&
11729              "LowerFormalArguments emitted a value with the wrong type!");
11730     }
11731   });
11732 
11733   // Update the DAG with the new chain value resulting from argument lowering.
11734   DAG.setRoot(NewRoot);
11735 
11736   // Set up the argument values.
11737   unsigned i = 0;
11738   if (!FuncInfo->CanLowerReturn) {
11739     // Create a virtual register for the sret pointer, and put in a copy
11740     // from the sret argument into it.
11741     SmallVector<EVT, 1> ValueVTs;
11742     ComputeValueVTs(*TLI, DAG.getDataLayout(),
11743                     PointerType::get(F.getContext(),
11744                                      DAG.getDataLayout().getAllocaAddrSpace()),
11745                     ValueVTs);
11746     MVT VT = ValueVTs[0].getSimpleVT();
11747     MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
11748     std::optional<ISD::NodeType> AssertOp;
11749     SDValue ArgValue =
11750         getCopyFromParts(DAG, dl, &InVals[0], 1, RegVT, VT, nullptr, NewRoot,
11751                          F.getCallingConv(), AssertOp);
11752 
11753     MachineFunction& MF = SDB->DAG.getMachineFunction();
11754     MachineRegisterInfo& RegInfo = MF.getRegInfo();
11755     Register SRetReg =
11756         RegInfo.createVirtualRegister(TLI->getRegClassFor(RegVT));
11757     FuncInfo->DemoteRegister = SRetReg;
11758     NewRoot =
11759         SDB->DAG.getCopyToReg(NewRoot, SDB->getCurSDLoc(), SRetReg, ArgValue);
11760     DAG.setRoot(NewRoot);
11761 
11762     // i indexes lowered arguments.  Bump it past the hidden sret argument.
11763     ++i;
11764   }
11765 
11766   SmallVector<SDValue, 4> Chains;
11767   DenseMap<int, int> ArgCopyElisionFrameIndexMap;
11768   for (const Argument &Arg : F.args()) {
11769     SmallVector<SDValue, 4> ArgValues;
11770     SmallVector<EVT, 4> ValueVTs;
11771     ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
11772     unsigned NumValues = ValueVTs.size();
11773     if (NumValues == 0)
11774       continue;
11775 
11776     bool ArgHasUses = !Arg.use_empty();
11777 
11778     // Elide the copying store if the target loaded this argument from a
11779     // suitable fixed stack object.
11780     if (Ins[i].Flags.isCopyElisionCandidate()) {
11781       unsigned NumParts = 0;
11782       for (EVT VT : ValueVTs)
11783         NumParts += TLI->getNumRegistersForCallingConv(*CurDAG->getContext(),
11784                                                        F.getCallingConv(), VT);
11785 
11786       tryToElideArgumentCopy(*FuncInfo, Chains, ArgCopyElisionFrameIndexMap,
11787                              ElidedArgCopyInstrs, ArgCopyElisionCandidates, Arg,
11788                              ArrayRef(&InVals[i], NumParts), ArgHasUses);
11789     }
11790 
11791     // If this argument is unused then remember its value. It is used to generate
11792     // debugging information.
11793     bool isSwiftErrorArg =
11794         TLI->supportSwiftError() &&
11795         Arg.hasAttribute(Attribute::SwiftError);
11796     if (!ArgHasUses && !isSwiftErrorArg) {
11797       SDB->setUnusedArgValue(&Arg, InVals[i]);
11798 
11799       // Also remember any frame index for use in FastISel.
11800       if (FrameIndexSDNode *FI =
11801           dyn_cast<FrameIndexSDNode>(InVals[i].getNode()))
11802         FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11803     }
11804 
11805     for (unsigned Val = 0; Val != NumValues; ++Val) {
11806       EVT VT = ValueVTs[Val];
11807       MVT PartVT = TLI->getRegisterTypeForCallingConv(*CurDAG->getContext(),
11808                                                       F.getCallingConv(), VT);
11809       unsigned NumParts = TLI->getNumRegistersForCallingConv(
11810           *CurDAG->getContext(), F.getCallingConv(), VT);
11811 
11812       // Even an apparent 'unused' swifterror argument needs to be returned. So
11813       // we do generate a copy for it that can be used on return from the
11814       // function.
11815       if (ArgHasUses || isSwiftErrorArg) {
11816         std::optional<ISD::NodeType> AssertOp;
11817         if (Arg.hasAttribute(Attribute::SExt))
11818           AssertOp = ISD::AssertSext;
11819         else if (Arg.hasAttribute(Attribute::ZExt))
11820           AssertOp = ISD::AssertZext;
11821 
11822         ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], NumParts,
11823                                              PartVT, VT, nullptr, NewRoot,
11824                                              F.getCallingConv(), AssertOp));
11825       }
11826 
11827       i += NumParts;
11828     }
11829 
11830     // We don't need to do anything else for unused arguments.
11831     if (ArgValues.empty())
11832       continue;
11833 
11834     // Note down frame index.
11835     if (FrameIndexSDNode *FI =
11836         dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode()))
11837       FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11838 
11839     SDValue Res = DAG.getMergeValues(ArrayRef(ArgValues.data(), NumValues),
11840                                      SDB->getCurSDLoc());
11841 
11842     SDB->setValue(&Arg, Res);
11843     if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::BUILD_PAIR) {
11844       // We want to associate the argument with the frame index, among
11845       // involved operands, that correspond to the lowest address. The
11846       // getCopyFromParts function, called earlier, is swapping the order of
11847       // the operands to BUILD_PAIR depending on endianness. The result of
11848       // that swapping is that the least significant bits of the argument will
11849       // be in the first operand of the BUILD_PAIR node, and the most
11850       // significant bits will be in the second operand.
11851       unsigned LowAddressOp = DAG.getDataLayout().isBigEndian() ? 1 : 0;
11852       if (LoadSDNode *LNode =
11853           dyn_cast<LoadSDNode>(Res.getOperand(LowAddressOp).getNode()))
11854         if (FrameIndexSDNode *FI =
11855             dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
11856           FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11857     }
11858 
11859     // Analyses past this point are naive and don't expect an assertion.
11860     if (Res.getOpcode() == ISD::AssertZext)
11861       Res = Res.getOperand(0);
11862 
11863     // Update the SwiftErrorVRegDefMap.
11864     if (Res.getOpcode() == ISD::CopyFromReg && isSwiftErrorArg) {
11865       Register Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
11866       if (Reg.isVirtual())
11867         SwiftError->setCurrentVReg(FuncInfo->MBB, SwiftError->getFunctionArg(),
11868                                    Reg);
11869     }
11870 
11871     // If this argument is live outside of the entry block, insert a copy from
11872     // wherever we got it to the vreg that other BB's will reference it as.
11873     if (Res.getOpcode() == ISD::CopyFromReg) {
11874       // If we can, though, try to skip creating an unnecessary vreg.
11875       // FIXME: This isn't very clean... it would be nice to make this more
11876       // general.
11877       Register Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
11878       if (Reg.isVirtual()) {
11879         FuncInfo->ValueMap[&Arg] = Reg;
11880         continue;
11881       }
11882     }
11883     if (!isOnlyUsedInEntryBlock(&Arg, TM.Options.EnableFastISel)) {
11884       FuncInfo->InitializeRegForValue(&Arg);
11885       SDB->CopyToExportRegsIfNeeded(&Arg);
11886     }
11887   }
11888 
11889   if (!Chains.empty()) {
11890     Chains.push_back(NewRoot);
11891     NewRoot = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
11892   }
11893 
11894   DAG.setRoot(NewRoot);
11895 
11896   assert(i == InVals.size() && "Argument register count mismatch!");
11897 
11898   // If any argument copy elisions occurred and we have debug info, update the
11899   // stale frame indices used in the dbg.declare variable info table.
11900   if (!ArgCopyElisionFrameIndexMap.empty()) {
11901     for (MachineFunction::VariableDbgInfo &VI :
11902          MF->getInStackSlotVariableDbgInfo()) {
11903       auto I = ArgCopyElisionFrameIndexMap.find(VI.getStackSlot());
11904       if (I != ArgCopyElisionFrameIndexMap.end())
11905         VI.updateStackSlot(I->second);
11906     }
11907   }
11908 
11909   // Finally, if the target has anything special to do, allow it to do so.
11910   emitFunctionEntryCode();
11911 }
11912 
11913 /// Handle PHI nodes in successor blocks.  Emit code into the SelectionDAG to
11914 /// ensure constants are generated when needed.  Remember the virtual registers
11915 /// that need to be added to the Machine PHI nodes as input.  We cannot just
11916 /// directly add them, because expansion might result in multiple MBB's for one
11917 /// BB.  As such, the start of the BB might correspond to a different MBB than
11918 /// the end.
11919 void
11920 SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
11921   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11922 
11923   SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
11924 
11925   // Check PHI nodes in successors that expect a value to be available from this
11926   // block.
11927   for (const BasicBlock *SuccBB : successors(LLVMBB->getTerminator())) {
11928     if (!isa<PHINode>(SuccBB->begin())) continue;
11929     MachineBasicBlock *SuccMBB = FuncInfo.getMBB(SuccBB);
11930 
11931     // If this terminator has multiple identical successors (common for
11932     // switches), only handle each succ once.
11933     if (!SuccsHandled.insert(SuccMBB).second)
11934       continue;
11935 
11936     MachineBasicBlock::iterator MBBI = SuccMBB->begin();
11937 
11938     // At this point we know that there is a 1-1 correspondence between LLVM PHI
11939     // nodes and Machine PHI nodes, but the incoming operands have not been
11940     // emitted yet.
11941     for (const PHINode &PN : SuccBB->phis()) {
11942       // Ignore dead phi's.
11943       if (PN.use_empty())
11944         continue;
11945 
11946       // Skip empty types
11947       if (PN.getType()->isEmptyTy())
11948         continue;
11949 
11950       unsigned Reg;
11951       const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
11952 
11953       if (const auto *C = dyn_cast<Constant>(PHIOp)) {
11954         unsigned &RegOut = ConstantsOut[C];
11955         if (RegOut == 0) {
11956           RegOut = FuncInfo.CreateRegs(C);
11957           // We need to zero/sign extend ConstantInt phi operands to match
11958           // assumptions in FunctionLoweringInfo::ComputePHILiveOutRegInfo.
11959           ISD::NodeType ExtendType = ISD::ANY_EXTEND;
11960           if (auto *CI = dyn_cast<ConstantInt>(C))
11961             ExtendType = TLI.signExtendConstant(CI) ? ISD::SIGN_EXTEND
11962                                                     : ISD::ZERO_EXTEND;
11963           CopyValueToVirtualRegister(C, RegOut, ExtendType);
11964         }
11965         Reg = RegOut;
11966       } else {
11967         DenseMap<const Value *, Register>::iterator I =
11968           FuncInfo.ValueMap.find(PHIOp);
11969         if (I != FuncInfo.ValueMap.end())
11970           Reg = I->second;
11971         else {
11972           assert(isa<AllocaInst>(PHIOp) &&
11973                  FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
11974                  "Didn't codegen value into a register!??");
11975           Reg = FuncInfo.CreateRegs(PHIOp);
11976           CopyValueToVirtualRegister(PHIOp, Reg);
11977         }
11978       }
11979 
11980       // Remember that this register needs to added to the machine PHI node as
11981       // the input for this MBB.
11982       SmallVector<EVT, 4> ValueVTs;
11983       ComputeValueVTs(TLI, DAG.getDataLayout(), PN.getType(), ValueVTs);
11984       for (EVT VT : ValueVTs) {
11985         const unsigned NumRegisters = TLI.getNumRegisters(*DAG.getContext(), VT);
11986         for (unsigned i = 0; i != NumRegisters; ++i)
11987           FuncInfo.PHINodesToUpdate.push_back(
11988               std::make_pair(&*MBBI++, Reg + i));
11989         Reg += NumRegisters;
11990       }
11991     }
11992   }
11993 
11994   ConstantsOut.clear();
11995 }
11996 
11997 MachineBasicBlock *SelectionDAGBuilder::NextBlock(MachineBasicBlock *MBB) {
11998   MachineFunction::iterator I(MBB);
11999   if (++I == FuncInfo.MF->end())
12000     return nullptr;
12001   return &*I;
12002 }
12003 
12004 /// During lowering new call nodes can be created (such as memset, etc.).
12005 /// Those will become new roots of the current DAG, but complications arise
12006 /// when they are tail calls. In such cases, the call lowering will update
12007 /// the root, but the builder still needs to know that a tail call has been
12008 /// lowered in order to avoid generating an additional return.
12009 void SelectionDAGBuilder::updateDAGForMaybeTailCall(SDValue MaybeTC) {
12010   // If the node is null, we do have a tail call.
12011   if (MaybeTC.getNode() != nullptr)
12012     DAG.setRoot(MaybeTC);
12013   else
12014     HasTailCall = true;
12015 }
12016 
12017 void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W, Value *Cond,
12018                                         MachineBasicBlock *SwitchMBB,
12019                                         MachineBasicBlock *DefaultMBB) {
12020   MachineFunction *CurMF = FuncInfo.MF;
12021   MachineBasicBlock *NextMBB = nullptr;
12022   MachineFunction::iterator BBI(W.MBB);
12023   if (++BBI != FuncInfo.MF->end())
12024     NextMBB = &*BBI;
12025 
12026   unsigned Size = W.LastCluster - W.FirstCluster + 1;
12027 
12028   BranchProbabilityInfo *BPI = FuncInfo.BPI;
12029 
12030   if (Size == 2 && W.MBB == SwitchMBB) {
12031     // If any two of the cases has the same destination, and if one value
12032     // is the same as the other, but has one bit unset that the other has set,
12033     // use bit manipulation to do two compares at once.  For example:
12034     // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
12035     // TODO: This could be extended to merge any 2 cases in switches with 3
12036     // cases.
12037     // TODO: Handle cases where W.CaseBB != SwitchBB.
12038     CaseCluster &Small = *W.FirstCluster;
12039     CaseCluster &Big = *W.LastCluster;
12040 
12041     if (Small.Low == Small.High && Big.Low == Big.High &&
12042         Small.MBB == Big.MBB) {
12043       const APInt &SmallValue = Small.Low->getValue();
12044       const APInt &BigValue = Big.Low->getValue();
12045 
12046       // Check that there is only one bit different.
12047       APInt CommonBit = BigValue ^ SmallValue;
12048       if (CommonBit.isPowerOf2()) {
12049         SDValue CondLHS = getValue(Cond);
12050         EVT VT = CondLHS.getValueType();
12051         SDLoc DL = getCurSDLoc();
12052 
12053         SDValue Or = DAG.getNode(ISD::OR, DL, VT, CondLHS,
12054                                  DAG.getConstant(CommonBit, DL, VT));
12055         SDValue Cond = DAG.getSetCC(
12056             DL, MVT::i1, Or, DAG.getConstant(BigValue | SmallValue, DL, VT),
12057             ISD::SETEQ);
12058 
12059         // Update successor info.
12060         // Both Small and Big will jump to Small.BB, so we sum up the
12061         // probabilities.
12062         addSuccessorWithProb(SwitchMBB, Small.MBB, Small.Prob + Big.Prob);
12063         if (BPI)
12064           addSuccessorWithProb(
12065               SwitchMBB, DefaultMBB,
12066               // The default destination is the first successor in IR.
12067               BPI->getEdgeProbability(SwitchMBB->getBasicBlock(), (unsigned)0));
12068         else
12069           addSuccessorWithProb(SwitchMBB, DefaultMBB);
12070 
12071         // Insert the true branch.
12072         SDValue BrCond =
12073             DAG.getNode(ISD::BRCOND, DL, MVT::Other, getControlRoot(), Cond,
12074                         DAG.getBasicBlock(Small.MBB));
12075         // Insert the false branch.
12076         BrCond = DAG.getNode(ISD::BR, DL, MVT::Other, BrCond,
12077                              DAG.getBasicBlock(DefaultMBB));
12078 
12079         DAG.setRoot(BrCond);
12080         return;
12081       }
12082     }
12083   }
12084 
12085   if (TM.getOptLevel() != CodeGenOptLevel::None) {
12086     // Here, we order cases by probability so the most likely case will be
12087     // checked first. However, two clusters can have the same probability in
12088     // which case their relative ordering is non-deterministic. So we use Low
12089     // as a tie-breaker as clusters are guaranteed to never overlap.
12090     llvm::sort(W.FirstCluster, W.LastCluster + 1,
12091                [](const CaseCluster &a, const CaseCluster &b) {
12092       return a.Prob != b.Prob ?
12093              a.Prob > b.Prob :
12094              a.Low->getValue().slt(b.Low->getValue());
12095     });
12096 
12097     // Rearrange the case blocks so that the last one falls through if possible
12098     // without changing the order of probabilities.
12099     for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster; ) {
12100       --I;
12101       if (I->Prob > W.LastCluster->Prob)
12102         break;
12103       if (I->Kind == CC_Range && I->MBB == NextMBB) {
12104         std::swap(*I, *W.LastCluster);
12105         break;
12106       }
12107     }
12108   }
12109 
12110   // Compute total probability.
12111   BranchProbability DefaultProb = W.DefaultProb;
12112   BranchProbability UnhandledProbs = DefaultProb;
12113   for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
12114     UnhandledProbs += I->Prob;
12115 
12116   MachineBasicBlock *CurMBB = W.MBB;
12117   for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
12118     bool FallthroughUnreachable = false;
12119     MachineBasicBlock *Fallthrough;
12120     if (I == W.LastCluster) {
12121       // For the last cluster, fall through to the default destination.
12122       Fallthrough = DefaultMBB;
12123       FallthroughUnreachable = isa<UnreachableInst>(
12124           DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg());
12125     } else {
12126       Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
12127       CurMF->insert(BBI, Fallthrough);
12128       // Put Cond in a virtual register to make it available from the new blocks.
12129       ExportFromCurrentBlock(Cond);
12130     }
12131     UnhandledProbs -= I->Prob;
12132 
12133     switch (I->Kind) {
12134       case CC_JumpTable: {
12135         // FIXME: Optimize away range check based on pivot comparisons.
12136         JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first;
12137         SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second;
12138 
12139         // The jump block hasn't been inserted yet; insert it here.
12140         MachineBasicBlock *JumpMBB = JT->MBB;
12141         CurMF->insert(BBI, JumpMBB);
12142 
12143         auto JumpProb = I->Prob;
12144         auto FallthroughProb = UnhandledProbs;
12145 
12146         // If the default statement is a target of the jump table, we evenly
12147         // distribute the default probability to successors of CurMBB. Also
12148         // update the probability on the edge from JumpMBB to Fallthrough.
12149         for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
12150                                               SE = JumpMBB->succ_end();
12151              SI != SE; ++SI) {
12152           if (*SI == DefaultMBB) {
12153             JumpProb += DefaultProb / 2;
12154             FallthroughProb -= DefaultProb / 2;
12155             JumpMBB->setSuccProbability(SI, DefaultProb / 2);
12156             JumpMBB->normalizeSuccProbs();
12157             break;
12158           }
12159         }
12160 
12161         // If the default clause is unreachable, propagate that knowledge into
12162         // JTH->FallthroughUnreachable which will use it to suppress the range
12163         // check.
12164         //
12165         // However, don't do this if we're doing branch target enforcement,
12166         // because a table branch _without_ a range check can be a tempting JOP
12167         // gadget - out-of-bounds inputs that are impossible in correct
12168         // execution become possible again if an attacker can influence the
12169         // control flow. So if an attacker doesn't already have a BTI bypass
12170         // available, we don't want them to be able to get one out of this
12171         // table branch.
12172         if (FallthroughUnreachable) {
12173           Function &CurFunc = CurMF->getFunction();
12174           if (!CurFunc.hasFnAttribute("branch-target-enforcement"))
12175             JTH->FallthroughUnreachable = true;
12176         }
12177 
12178         if (!JTH->FallthroughUnreachable)
12179           addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
12180         addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
12181         CurMBB->normalizeSuccProbs();
12182 
12183         // The jump table header will be inserted in our current block, do the
12184         // range check, and fall through to our fallthrough block.
12185         JTH->HeaderBB = CurMBB;
12186         JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
12187 
12188         // If we're in the right place, emit the jump table header right now.
12189         if (CurMBB == SwitchMBB) {
12190           visitJumpTableHeader(*JT, *JTH, SwitchMBB);
12191           JTH->Emitted = true;
12192         }
12193         break;
12194       }
12195       case CC_BitTests: {
12196         // FIXME: Optimize away range check based on pivot comparisons.
12197         BitTestBlock *BTB = &SL->BitTestCases[I->BTCasesIndex];
12198 
12199         // The bit test blocks haven't been inserted yet; insert them here.
12200         for (BitTestCase &BTC : BTB->Cases)
12201           CurMF->insert(BBI, BTC.ThisBB);
12202 
12203         // Fill in fields of the BitTestBlock.
12204         BTB->Parent = CurMBB;
12205         BTB->Default = Fallthrough;
12206 
12207         BTB->DefaultProb = UnhandledProbs;
12208         // If the cases in bit test don't form a contiguous range, we evenly
12209         // distribute the probability on the edge to Fallthrough to two
12210         // successors of CurMBB.
12211         if (!BTB->ContiguousRange) {
12212           BTB->Prob += DefaultProb / 2;
12213           BTB->DefaultProb -= DefaultProb / 2;
12214         }
12215 
12216         if (FallthroughUnreachable)
12217           BTB->FallthroughUnreachable = true;
12218 
12219         // If we're in the right place, emit the bit test header right now.
12220         if (CurMBB == SwitchMBB) {
12221           visitBitTestHeader(*BTB, SwitchMBB);
12222           BTB->Emitted = true;
12223         }
12224         break;
12225       }
12226       case CC_Range: {
12227         const Value *RHS, *LHS, *MHS;
12228         ISD::CondCode CC;
12229         if (I->Low == I->High) {
12230           // Check Cond == I->Low.
12231           CC = ISD::SETEQ;
12232           LHS = Cond;
12233           RHS=I->Low;
12234           MHS = nullptr;
12235         } else {
12236           // Check I->Low <= Cond <= I->High.
12237           CC = ISD::SETLE;
12238           LHS = I->Low;
12239           MHS = Cond;
12240           RHS = I->High;
12241         }
12242 
12243         // If Fallthrough is unreachable, fold away the comparison.
12244         if (FallthroughUnreachable)
12245           CC = ISD::SETTRUE;
12246 
12247         // The false probability is the sum of all unhandled cases.
12248         CaseBlock CB(CC, LHS, RHS, MHS, I->MBB, Fallthrough, CurMBB,
12249                      getCurSDLoc(), I->Prob, UnhandledProbs);
12250 
12251         if (CurMBB == SwitchMBB)
12252           visitSwitchCase(CB, SwitchMBB);
12253         else
12254           SL->SwitchCases.push_back(CB);
12255 
12256         break;
12257       }
12258     }
12259     CurMBB = Fallthrough;
12260   }
12261 }
12262 
12263 void SelectionDAGBuilder::splitWorkItem(SwitchWorkList &WorkList,
12264                                         const SwitchWorkListItem &W,
12265                                         Value *Cond,
12266                                         MachineBasicBlock *SwitchMBB) {
12267   assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) &&
12268          "Clusters not sorted?");
12269   assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!");
12270 
12271   auto [LastLeft, FirstRight, LeftProb, RightProb] =
12272       SL->computeSplitWorkItemInfo(W);
12273 
12274   // Use the first element on the right as pivot since we will make less-than
12275   // comparisons against it.
12276   CaseClusterIt PivotCluster = FirstRight;
12277   assert(PivotCluster > W.FirstCluster);
12278   assert(PivotCluster <= W.LastCluster);
12279 
12280   CaseClusterIt FirstLeft = W.FirstCluster;
12281   CaseClusterIt LastRight = W.LastCluster;
12282 
12283   const ConstantInt *Pivot = PivotCluster->Low;
12284 
12285   // New blocks will be inserted immediately after the current one.
12286   MachineFunction::iterator BBI(W.MBB);
12287   ++BBI;
12288 
12289   // We will branch to the LHS if Value < Pivot. If LHS is a single cluster,
12290   // we can branch to its destination directly if it's squeezed exactly in
12291   // between the known lower bound and Pivot - 1.
12292   MachineBasicBlock *LeftMBB;
12293   if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&
12294       FirstLeft->Low == W.GE &&
12295       (FirstLeft->High->getValue() + 1LL) == Pivot->getValue()) {
12296     LeftMBB = FirstLeft->MBB;
12297   } else {
12298     LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
12299     FuncInfo.MF->insert(BBI, LeftMBB);
12300     WorkList.push_back(
12301         {LeftMBB, FirstLeft, LastLeft, W.GE, Pivot, W.DefaultProb / 2});
12302     // Put Cond in a virtual register to make it available from the new blocks.
12303     ExportFromCurrentBlock(Cond);
12304   }
12305 
12306   // Similarly, we will branch to the RHS if Value >= Pivot. If RHS is a
12307   // single cluster, RHS.Low == Pivot, and we can branch to its destination
12308   // directly if RHS.High equals the current upper bound.
12309   MachineBasicBlock *RightMBB;
12310   if (FirstRight == LastRight && FirstRight->Kind == CC_Range &&
12311       W.LT && (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) {
12312     RightMBB = FirstRight->MBB;
12313   } else {
12314     RightMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
12315     FuncInfo.MF->insert(BBI, RightMBB);
12316     WorkList.push_back(
12317         {RightMBB, FirstRight, LastRight, Pivot, W.LT, W.DefaultProb / 2});
12318     // Put Cond in a virtual register to make it available from the new blocks.
12319     ExportFromCurrentBlock(Cond);
12320   }
12321 
12322   // Create the CaseBlock record that will be used to lower the branch.
12323   CaseBlock CB(ISD::SETLT, Cond, Pivot, nullptr, LeftMBB, RightMBB, W.MBB,
12324                getCurSDLoc(), LeftProb, RightProb);
12325 
12326   if (W.MBB == SwitchMBB)
12327     visitSwitchCase(CB, SwitchMBB);
12328   else
12329     SL->SwitchCases.push_back(CB);
12330 }
12331 
12332 // Scale CaseProb after peeling a case with the probablity of PeeledCaseProb
12333 // from the swith statement.
12334 static BranchProbability scaleCaseProbality(BranchProbability CaseProb,
12335                                             BranchProbability PeeledCaseProb) {
12336   if (PeeledCaseProb == BranchProbability::getOne())
12337     return BranchProbability::getZero();
12338   BranchProbability SwitchProb = PeeledCaseProb.getCompl();
12339 
12340   uint32_t Numerator = CaseProb.getNumerator();
12341   uint32_t Denominator = SwitchProb.scale(CaseProb.getDenominator());
12342   return BranchProbability(Numerator, std::max(Numerator, Denominator));
12343 }
12344 
12345 // Try to peel the top probability case if it exceeds the threshold.
12346 // Return current MachineBasicBlock for the switch statement if the peeling
12347 // does not occur.
12348 // If the peeling is performed, return the newly created MachineBasicBlock
12349 // for the peeled switch statement. Also update Clusters to remove the peeled
12350 // case. PeeledCaseProb is the BranchProbability for the peeled case.
12351 MachineBasicBlock *SelectionDAGBuilder::peelDominantCaseCluster(
12352     const SwitchInst &SI, CaseClusterVector &Clusters,
12353     BranchProbability &PeeledCaseProb) {
12354   MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
12355   // Don't perform if there is only one cluster or optimizing for size.
12356   if (SwitchPeelThreshold > 100 || !FuncInfo.BPI || Clusters.size() < 2 ||
12357       TM.getOptLevel() == CodeGenOptLevel::None ||
12358       SwitchMBB->getParent()->getFunction().hasMinSize())
12359     return SwitchMBB;
12360 
12361   BranchProbability TopCaseProb = BranchProbability(SwitchPeelThreshold, 100);
12362   unsigned PeeledCaseIndex = 0;
12363   bool SwitchPeeled = false;
12364   for (unsigned Index = 0; Index < Clusters.size(); ++Index) {
12365     CaseCluster &CC = Clusters[Index];
12366     if (CC.Prob < TopCaseProb)
12367       continue;
12368     TopCaseProb = CC.Prob;
12369     PeeledCaseIndex = Index;
12370     SwitchPeeled = true;
12371   }
12372   if (!SwitchPeeled)
12373     return SwitchMBB;
12374 
12375   LLVM_DEBUG(dbgs() << "Peeled one top case in switch stmt, prob: "
12376                     << TopCaseProb << "\n");
12377 
12378   // Record the MBB for the peeled switch statement.
12379   MachineFunction::iterator BBI(SwitchMBB);
12380   ++BBI;
12381   MachineBasicBlock *PeeledSwitchMBB =
12382       FuncInfo.MF->CreateMachineBasicBlock(SwitchMBB->getBasicBlock());
12383   FuncInfo.MF->insert(BBI, PeeledSwitchMBB);
12384 
12385   ExportFromCurrentBlock(SI.getCondition());
12386   auto PeeledCaseIt = Clusters.begin() + PeeledCaseIndex;
12387   SwitchWorkListItem W = {SwitchMBB, PeeledCaseIt, PeeledCaseIt,
12388                           nullptr,   nullptr,      TopCaseProb.getCompl()};
12389   lowerWorkItem(W, SI.getCondition(), SwitchMBB, PeeledSwitchMBB);
12390 
12391   Clusters.erase(PeeledCaseIt);
12392   for (CaseCluster &CC : Clusters) {
12393     LLVM_DEBUG(
12394         dbgs() << "Scale the probablity for one cluster, before scaling: "
12395                << CC.Prob << "\n");
12396     CC.Prob = scaleCaseProbality(CC.Prob, TopCaseProb);
12397     LLVM_DEBUG(dbgs() << "After scaling: " << CC.Prob << "\n");
12398   }
12399   PeeledCaseProb = TopCaseProb;
12400   return PeeledSwitchMBB;
12401 }
12402 
12403 void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
12404   // Extract cases from the switch.
12405   BranchProbabilityInfo *BPI = FuncInfo.BPI;
12406   CaseClusterVector Clusters;
12407   Clusters.reserve(SI.getNumCases());
12408   for (auto I : SI.cases()) {
12409     MachineBasicBlock *Succ = FuncInfo.getMBB(I.getCaseSuccessor());
12410     const ConstantInt *CaseVal = I.getCaseValue();
12411     BranchProbability Prob =
12412         BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
12413             : BranchProbability(1, SI.getNumCases() + 1);
12414     Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
12415   }
12416 
12417   MachineBasicBlock *DefaultMBB = FuncInfo.getMBB(SI.getDefaultDest());
12418 
12419   // Cluster adjacent cases with the same destination. We do this at all
12420   // optimization levels because it's cheap to do and will make codegen faster
12421   // if there are many clusters.
12422   sortAndRangeify(Clusters);
12423 
12424   // The branch probablity of the peeled case.
12425   BranchProbability PeeledCaseProb = BranchProbability::getZero();
12426   MachineBasicBlock *PeeledSwitchMBB =
12427       peelDominantCaseCluster(SI, Clusters, PeeledCaseProb);
12428 
12429   // If there is only the default destination, jump there directly.
12430   MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
12431   if (Clusters.empty()) {
12432     assert(PeeledSwitchMBB == SwitchMBB);
12433     SwitchMBB->addSuccessor(DefaultMBB);
12434     if (DefaultMBB != NextBlock(SwitchMBB)) {
12435       DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
12436                               getControlRoot(), DAG.getBasicBlock(DefaultMBB)));
12437     }
12438     return;
12439   }
12440 
12441   SL->findJumpTables(Clusters, &SI, getCurSDLoc(), DefaultMBB, DAG.getPSI(),
12442                      DAG.getBFI());
12443   SL->findBitTestClusters(Clusters, &SI);
12444 
12445   LLVM_DEBUG({
12446     dbgs() << "Case clusters: ";
12447     for (const CaseCluster &C : Clusters) {
12448       if (C.Kind == CC_JumpTable)
12449         dbgs() << "JT:";
12450       if (C.Kind == CC_BitTests)
12451         dbgs() << "BT:";
12452 
12453       C.Low->getValue().print(dbgs(), true);
12454       if (C.Low != C.High) {
12455         dbgs() << '-';
12456         C.High->getValue().print(dbgs(), true);
12457       }
12458       dbgs() << ' ';
12459     }
12460     dbgs() << '\n';
12461   });
12462 
12463   assert(!Clusters.empty());
12464   SwitchWorkList WorkList;
12465   CaseClusterIt First = Clusters.begin();
12466   CaseClusterIt Last = Clusters.end() - 1;
12467   auto DefaultProb = getEdgeProbability(PeeledSwitchMBB, DefaultMBB);
12468   // Scale the branchprobability for DefaultMBB if the peel occurs and
12469   // DefaultMBB is not replaced.
12470   if (PeeledCaseProb != BranchProbability::getZero() &&
12471       DefaultMBB == FuncInfo.getMBB(SI.getDefaultDest()))
12472     DefaultProb = scaleCaseProbality(DefaultProb, PeeledCaseProb);
12473   WorkList.push_back(
12474       {PeeledSwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
12475 
12476   while (!WorkList.empty()) {
12477     SwitchWorkListItem W = WorkList.pop_back_val();
12478     unsigned NumClusters = W.LastCluster - W.FirstCluster + 1;
12479 
12480     if (NumClusters > 3 && TM.getOptLevel() != CodeGenOptLevel::None &&
12481         !DefaultMBB->getParent()->getFunction().hasMinSize()) {
12482       // For optimized builds, lower large range as a balanced binary tree.
12483       splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB);
12484       continue;
12485     }
12486 
12487     lowerWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB);
12488   }
12489 }
12490 
12491 void SelectionDAGBuilder::visitStepVector(const CallInst &I) {
12492   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12493   auto DL = getCurSDLoc();
12494   EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
12495   setValue(&I, DAG.getStepVector(DL, ResultVT));
12496 }
12497 
12498 void SelectionDAGBuilder::visitVectorReverse(const CallInst &I) {
12499   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12500   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
12501 
12502   SDLoc DL = getCurSDLoc();
12503   SDValue V = getValue(I.getOperand(0));
12504   assert(VT == V.getValueType() && "Malformed vector.reverse!");
12505 
12506   if (VT.isScalableVector()) {
12507     setValue(&I, DAG.getNode(ISD::VECTOR_REVERSE, DL, VT, V));
12508     return;
12509   }
12510 
12511   // Use VECTOR_SHUFFLE for the fixed-length vector
12512   // to maintain existing behavior.
12513   SmallVector<int, 8> Mask;
12514   unsigned NumElts = VT.getVectorMinNumElements();
12515   for (unsigned i = 0; i != NumElts; ++i)
12516     Mask.push_back(NumElts - 1 - i);
12517 
12518   setValue(&I, DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), Mask));
12519 }
12520 
12521 void SelectionDAGBuilder::visitVectorDeinterleave(const CallInst &I) {
12522   auto DL = getCurSDLoc();
12523   SDValue InVec = getValue(I.getOperand(0));
12524   EVT OutVT =
12525       InVec.getValueType().getHalfNumVectorElementsVT(*DAG.getContext());
12526 
12527   unsigned OutNumElts = OutVT.getVectorMinNumElements();
12528 
12529   // ISD Node needs the input vectors split into two equal parts
12530   SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OutVT, InVec,
12531                            DAG.getVectorIdxConstant(0, DL));
12532   SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OutVT, InVec,
12533                            DAG.getVectorIdxConstant(OutNumElts, DL));
12534 
12535   // Use VECTOR_SHUFFLE for fixed-length vectors to benefit from existing
12536   // legalisation and combines.
12537   if (OutVT.isFixedLengthVector()) {
12538     SDValue Even = DAG.getVectorShuffle(OutVT, DL, Lo, Hi,
12539                                         createStrideMask(0, 2, OutNumElts));
12540     SDValue Odd = DAG.getVectorShuffle(OutVT, DL, Lo, Hi,
12541                                        createStrideMask(1, 2, OutNumElts));
12542     SDValue Res = DAG.getMergeValues({Even, Odd}, getCurSDLoc());
12543     setValue(&I, Res);
12544     return;
12545   }
12546 
12547   SDValue Res = DAG.getNode(ISD::VECTOR_DEINTERLEAVE, DL,
12548                             DAG.getVTList(OutVT, OutVT), Lo, Hi);
12549   setValue(&I, Res);
12550 }
12551 
12552 void SelectionDAGBuilder::visitVectorInterleave(const CallInst &I) {
12553   auto DL = getCurSDLoc();
12554   EVT InVT = getValue(I.getOperand(0)).getValueType();
12555   SDValue InVec0 = getValue(I.getOperand(0));
12556   SDValue InVec1 = getValue(I.getOperand(1));
12557   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12558   EVT OutVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
12559 
12560   // Use VECTOR_SHUFFLE for fixed-length vectors to benefit from existing
12561   // legalisation and combines.
12562   if (OutVT.isFixedLengthVector()) {
12563     unsigned NumElts = InVT.getVectorMinNumElements();
12564     SDValue V = DAG.getNode(ISD::CONCAT_VECTORS, DL, OutVT, InVec0, InVec1);
12565     setValue(&I, DAG.getVectorShuffle(OutVT, DL, V, DAG.getUNDEF(OutVT),
12566                                       createInterleaveMask(NumElts, 2)));
12567     return;
12568   }
12569 
12570   SDValue Res = DAG.getNode(ISD::VECTOR_INTERLEAVE, DL,
12571                             DAG.getVTList(InVT, InVT), InVec0, InVec1);
12572   Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, OutVT, Res.getValue(0),
12573                     Res.getValue(1));
12574   setValue(&I, Res);
12575 }
12576 
12577 void SelectionDAGBuilder::visitFreeze(const FreezeInst &I) {
12578   SmallVector<EVT, 4> ValueVTs;
12579   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
12580                   ValueVTs);
12581   unsigned NumValues = ValueVTs.size();
12582   if (NumValues == 0) return;
12583 
12584   SmallVector<SDValue, 4> Values(NumValues);
12585   SDValue Op = getValue(I.getOperand(0));
12586 
12587   for (unsigned i = 0; i != NumValues; ++i)
12588     Values[i] = DAG.getNode(ISD::FREEZE, getCurSDLoc(), ValueVTs[i],
12589                             SDValue(Op.getNode(), Op.getResNo() + i));
12590 
12591   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
12592                            DAG.getVTList(ValueVTs), Values));
12593 }
12594 
12595 void SelectionDAGBuilder::visitVectorSplice(const CallInst &I) {
12596   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12597   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
12598 
12599   SDLoc DL = getCurSDLoc();
12600   SDValue V1 = getValue(I.getOperand(0));
12601   SDValue V2 = getValue(I.getOperand(1));
12602   int64_t Imm = cast<ConstantInt>(I.getOperand(2))->getSExtValue();
12603 
12604   // VECTOR_SHUFFLE doesn't support a scalable mask so use a dedicated node.
12605   if (VT.isScalableVector()) {
12606     setValue(
12607         &I, DAG.getNode(ISD::VECTOR_SPLICE, DL, VT, V1, V2,
12608                         DAG.getSignedConstant(
12609                             Imm, DL, TLI.getVectorIdxTy(DAG.getDataLayout()))));
12610     return;
12611   }
12612 
12613   unsigned NumElts = VT.getVectorNumElements();
12614 
12615   uint64_t Idx = (NumElts + Imm) % NumElts;
12616 
12617   // Use VECTOR_SHUFFLE to maintain original behaviour for fixed-length vectors.
12618   SmallVector<int, 8> Mask;
12619   for (unsigned i = 0; i < NumElts; ++i)
12620     Mask.push_back(Idx + i);
12621   setValue(&I, DAG.getVectorShuffle(VT, DL, V1, V2, Mask));
12622 }
12623 
12624 // Consider the following MIR after SelectionDAG, which produces output in
12625 // phyregs in the first case or virtregs in the second case.
12626 //
12627 // INLINEASM_BR ..., implicit-def $ebx, ..., implicit-def $edx
12628 // %5:gr32 = COPY $ebx
12629 // %6:gr32 = COPY $edx
12630 // %1:gr32 = COPY %6:gr32
12631 // %0:gr32 = COPY %5:gr32
12632 //
12633 // INLINEASM_BR ..., def %5:gr32, ..., def %6:gr32
12634 // %1:gr32 = COPY %6:gr32
12635 // %0:gr32 = COPY %5:gr32
12636 //
12637 // Given %0, we'd like to return $ebx in the first case and %5 in the second.
12638 // Given %1, we'd like to return $edx in the first case and %6 in the second.
12639 //
12640 // If a callbr has outputs, it will have a single mapping in FuncInfo.ValueMap
12641 // to a single virtreg (such as %0). The remaining outputs monotonically
12642 // increase in virtreg number from there. If a callbr has no outputs, then it
12643 // should not have a corresponding callbr landingpad; in fact, the callbr
12644 // landingpad would not even be able to refer to such a callbr.
12645 static Register FollowCopyChain(MachineRegisterInfo &MRI, Register Reg) {
12646   MachineInstr *MI = MRI.def_begin(Reg)->getParent();
12647   // There is definitely at least one copy.
12648   assert(MI->getOpcode() == TargetOpcode::COPY &&
12649          "start of copy chain MUST be COPY");
12650   Reg = MI->getOperand(1).getReg();
12651   MI = MRI.def_begin(Reg)->getParent();
12652   // There may be an optional second copy.
12653   if (MI->getOpcode() == TargetOpcode::COPY) {
12654     assert(Reg.isVirtual() && "expected COPY of virtual register");
12655     Reg = MI->getOperand(1).getReg();
12656     assert(Reg.isPhysical() && "expected COPY of physical register");
12657     MI = MRI.def_begin(Reg)->getParent();
12658   }
12659   // The start of the chain must be an INLINEASM_BR.
12660   assert(MI->getOpcode() == TargetOpcode::INLINEASM_BR &&
12661          "end of copy chain MUST be INLINEASM_BR");
12662   return Reg;
12663 }
12664 
12665 // We must do this walk rather than the simpler
12666 //   setValue(&I, getCopyFromRegs(CBR, CBR->getType()));
12667 // otherwise we will end up with copies of virtregs only valid along direct
12668 // edges.
12669 void SelectionDAGBuilder::visitCallBrLandingPad(const CallInst &I) {
12670   SmallVector<EVT, 8> ResultVTs;
12671   SmallVector<SDValue, 8> ResultValues;
12672   const auto *CBR =
12673       cast<CallBrInst>(I.getParent()->getUniquePredecessor()->getTerminator());
12674 
12675   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12676   const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
12677   MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
12678 
12679   unsigned InitialDef = FuncInfo.ValueMap[CBR];
12680   SDValue Chain = DAG.getRoot();
12681 
12682   // Re-parse the asm constraints string.
12683   TargetLowering::AsmOperandInfoVector TargetConstraints =
12684       TLI.ParseConstraints(DAG.getDataLayout(), TRI, *CBR);
12685   for (auto &T : TargetConstraints) {
12686     SDISelAsmOperandInfo OpInfo(T);
12687     if (OpInfo.Type != InlineAsm::isOutput)
12688       continue;
12689 
12690     // Pencil in OpInfo.ConstraintType and OpInfo.ConstraintVT based on the
12691     // individual constraint.
12692     TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
12693 
12694     switch (OpInfo.ConstraintType) {
12695     case TargetLowering::C_Register:
12696     case TargetLowering::C_RegisterClass: {
12697       // Fill in OpInfo.AssignedRegs.Regs.
12698       getRegistersForValue(DAG, getCurSDLoc(), OpInfo, OpInfo);
12699 
12700       // getRegistersForValue may produce 1 to many registers based on whether
12701       // the OpInfo.ConstraintVT is legal on the target or not.
12702       for (Register &Reg : OpInfo.AssignedRegs.Regs) {
12703         Register OriginalDef = FollowCopyChain(MRI, InitialDef++);
12704         if (Register::isPhysicalRegister(OriginalDef))
12705           FuncInfo.MBB->addLiveIn(OriginalDef);
12706         // Update the assigned registers to use the original defs.
12707         Reg = OriginalDef;
12708       }
12709 
12710       SDValue V = OpInfo.AssignedRegs.getCopyFromRegs(
12711           DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, CBR);
12712       ResultValues.push_back(V);
12713       ResultVTs.push_back(OpInfo.ConstraintVT);
12714       break;
12715     }
12716     case TargetLowering::C_Other: {
12717       SDValue Flag;
12718       SDValue V = TLI.LowerAsmOutputForConstraint(Chain, Flag, getCurSDLoc(),
12719                                                   OpInfo, DAG);
12720       ++InitialDef;
12721       ResultValues.push_back(V);
12722       ResultVTs.push_back(OpInfo.ConstraintVT);
12723       break;
12724     }
12725     default:
12726       break;
12727     }
12728   }
12729   SDValue V = DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
12730                           DAG.getVTList(ResultVTs), ResultValues);
12731   setValue(&I, V);
12732 }
12733