xref: /llvm-project/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp (revision e55c167777651fa0c8c7666d746ee8e2c106450c)
1 //===- SelectionDAGBuilder.cpp - Selection-DAG building -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements routines for translating from LLVM IR into SelectionDAG IR.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "SelectionDAGBuilder.h"
14 #include "SDNodeDbgValue.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/BitVector.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/SmallSet.h"
21 #include "llvm/ADT/StringRef.h"
22 #include "llvm/ADT/Twine.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/Analysis/BranchProbabilityInfo.h"
25 #include "llvm/Analysis/ConstantFolding.h"
26 #include "llvm/Analysis/Loads.h"
27 #include "llvm/Analysis/MemoryLocation.h"
28 #include "llvm/Analysis/TargetLibraryInfo.h"
29 #include "llvm/Analysis/TargetTransformInfo.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/Analysis/VectorUtils.h"
32 #include "llvm/CodeGen/Analysis.h"
33 #include "llvm/CodeGen/AssignmentTrackingAnalysis.h"
34 #include "llvm/CodeGen/CodeGenCommonISel.h"
35 #include "llvm/CodeGen/FunctionLoweringInfo.h"
36 #include "llvm/CodeGen/GCMetadata.h"
37 #include "llvm/CodeGen/ISDOpcodes.h"
38 #include "llvm/CodeGen/MachineBasicBlock.h"
39 #include "llvm/CodeGen/MachineFrameInfo.h"
40 #include "llvm/CodeGen/MachineFunction.h"
41 #include "llvm/CodeGen/MachineInstrBuilder.h"
42 #include "llvm/CodeGen/MachineInstrBundleIterator.h"
43 #include "llvm/CodeGen/MachineMemOperand.h"
44 #include "llvm/CodeGen/MachineModuleInfo.h"
45 #include "llvm/CodeGen/MachineOperand.h"
46 #include "llvm/CodeGen/MachineRegisterInfo.h"
47 #include "llvm/CodeGen/SelectionDAG.h"
48 #include "llvm/CodeGen/SelectionDAGTargetInfo.h"
49 #include "llvm/CodeGen/StackMaps.h"
50 #include "llvm/CodeGen/SwiftErrorValueTracking.h"
51 #include "llvm/CodeGen/TargetFrameLowering.h"
52 #include "llvm/CodeGen/TargetInstrInfo.h"
53 #include "llvm/CodeGen/TargetOpcodes.h"
54 #include "llvm/CodeGen/TargetRegisterInfo.h"
55 #include "llvm/CodeGen/TargetSubtargetInfo.h"
56 #include "llvm/CodeGen/WinEHFuncInfo.h"
57 #include "llvm/IR/Argument.h"
58 #include "llvm/IR/Attributes.h"
59 #include "llvm/IR/BasicBlock.h"
60 #include "llvm/IR/CFG.h"
61 #include "llvm/IR/CallingConv.h"
62 #include "llvm/IR/Constant.h"
63 #include "llvm/IR/ConstantRange.h"
64 #include "llvm/IR/Constants.h"
65 #include "llvm/IR/DataLayout.h"
66 #include "llvm/IR/DebugInfo.h"
67 #include "llvm/IR/DebugInfoMetadata.h"
68 #include "llvm/IR/DerivedTypes.h"
69 #include "llvm/IR/DiagnosticInfo.h"
70 #include "llvm/IR/EHPersonalities.h"
71 #include "llvm/IR/Function.h"
72 #include "llvm/IR/GetElementPtrTypeIterator.h"
73 #include "llvm/IR/InlineAsm.h"
74 #include "llvm/IR/InstrTypes.h"
75 #include "llvm/IR/Instructions.h"
76 #include "llvm/IR/IntrinsicInst.h"
77 #include "llvm/IR/Intrinsics.h"
78 #include "llvm/IR/IntrinsicsAArch64.h"
79 #include "llvm/IR/IntrinsicsAMDGPU.h"
80 #include "llvm/IR/IntrinsicsWebAssembly.h"
81 #include "llvm/IR/LLVMContext.h"
82 #include "llvm/IR/MemoryModelRelaxationAnnotations.h"
83 #include "llvm/IR/Metadata.h"
84 #include "llvm/IR/Module.h"
85 #include "llvm/IR/Operator.h"
86 #include "llvm/IR/PatternMatch.h"
87 #include "llvm/IR/Statepoint.h"
88 #include "llvm/IR/Type.h"
89 #include "llvm/IR/User.h"
90 #include "llvm/IR/Value.h"
91 #include "llvm/MC/MCContext.h"
92 #include "llvm/Support/AtomicOrdering.h"
93 #include "llvm/Support/Casting.h"
94 #include "llvm/Support/CommandLine.h"
95 #include "llvm/Support/Compiler.h"
96 #include "llvm/Support/Debug.h"
97 #include "llvm/Support/InstructionCost.h"
98 #include "llvm/Support/MathExtras.h"
99 #include "llvm/Support/raw_ostream.h"
100 #include "llvm/Target/TargetIntrinsicInfo.h"
101 #include "llvm/Target/TargetMachine.h"
102 #include "llvm/Target/TargetOptions.h"
103 #include "llvm/TargetParser/Triple.h"
104 #include "llvm/Transforms/Utils/Local.h"
105 #include <cstddef>
106 #include <limits>
107 #include <optional>
108 #include <tuple>
109 
110 using namespace llvm;
111 using namespace PatternMatch;
112 using namespace SwitchCG;
113 
114 #define DEBUG_TYPE "isel"
115 
116 /// LimitFloatPrecision - Generate low-precision inline sequences for
117 /// some float libcalls (6, 8 or 12 bits).
118 static unsigned LimitFloatPrecision;
119 
120 static cl::opt<bool>
121     InsertAssertAlign("insert-assert-align", cl::init(true),
122                       cl::desc("Insert the experimental `assertalign` node."),
123                       cl::ReallyHidden);
124 
125 static cl::opt<unsigned, true>
126     LimitFPPrecision("limit-float-precision",
127                      cl::desc("Generate low-precision inline sequences "
128                               "for some float libcalls"),
129                      cl::location(LimitFloatPrecision), cl::Hidden,
130                      cl::init(0));
131 
132 static cl::opt<unsigned> SwitchPeelThreshold(
133     "switch-peel-threshold", cl::Hidden, cl::init(66),
134     cl::desc("Set the case probability threshold for peeling the case from a "
135              "switch statement. A value greater than 100 will void this "
136              "optimization"));
137 
138 // Limit the width of DAG chains. This is important in general to prevent
139 // DAG-based analysis from blowing up. For example, alias analysis and
140 // load clustering may not complete in reasonable time. It is difficult to
141 // recognize and avoid this situation within each individual analysis, and
142 // future analyses are likely to have the same behavior. Limiting DAG width is
143 // the safe approach and will be especially important with global DAGs.
144 //
145 // MaxParallelChains default is arbitrarily high to avoid affecting
146 // optimization, but could be lowered to improve compile time. Any ld-ld-st-st
147 // sequence over this should have been converted to llvm.memcpy by the
148 // frontend. It is easy to induce this behavior with .ll code such as:
149 // %buffer = alloca [4096 x i8]
150 // %data = load [4096 x i8]* %argPtr
151 // store [4096 x i8] %data, [4096 x i8]* %buffer
152 static const unsigned MaxParallelChains = 64;
153 
154 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
155                                       const SDValue *Parts, unsigned NumParts,
156                                       MVT PartVT, EVT ValueVT, const Value *V,
157                                       SDValue InChain,
158                                       std::optional<CallingConv::ID> CC);
159 
160 /// getCopyFromParts - Create a value that contains the specified legal parts
161 /// combined into the value they represent.  If the parts combine to a type
162 /// larger than ValueVT then AssertOp can be used to specify whether the extra
163 /// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
164 /// (ISD::AssertSext).
165 static SDValue
166 getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts,
167                  unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V,
168                  SDValue InChain,
169                  std::optional<CallingConv::ID> CC = std::nullopt,
170                  std::optional<ISD::NodeType> AssertOp = std::nullopt) {
171   // Let the target assemble the parts if it wants to
172   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
173   if (SDValue Val = TLI.joinRegisterPartsIntoValue(DAG, DL, Parts, NumParts,
174                                                    PartVT, ValueVT, CC))
175     return Val;
176 
177   if (ValueVT.isVector())
178     return getCopyFromPartsVector(DAG, DL, Parts, NumParts, PartVT, ValueVT, V,
179                                   InChain, CC);
180 
181   assert(NumParts > 0 && "No parts to assemble!");
182   SDValue Val = Parts[0];
183 
184   if (NumParts > 1) {
185     // Assemble the value from multiple parts.
186     if (ValueVT.isInteger()) {
187       unsigned PartBits = PartVT.getSizeInBits();
188       unsigned ValueBits = ValueVT.getSizeInBits();
189 
190       // Assemble the power of 2 part.
191       unsigned RoundParts = llvm::bit_floor(NumParts);
192       unsigned RoundBits = PartBits * RoundParts;
193       EVT RoundVT = RoundBits == ValueBits ?
194         ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
195       SDValue Lo, Hi;
196 
197       EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
198 
199       if (RoundParts > 2) {
200         Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2, PartVT, HalfVT, V,
201                               InChain);
202         Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2, RoundParts / 2,
203                               PartVT, HalfVT, V, InChain);
204       } else {
205         Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]);
206         Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]);
207       }
208 
209       if (DAG.getDataLayout().isBigEndian())
210         std::swap(Lo, Hi);
211 
212       Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi);
213 
214       if (RoundParts < NumParts) {
215         // Assemble the trailing non-power-of-2 part.
216         unsigned OddParts = NumParts - RoundParts;
217         EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
218         Hi = getCopyFromParts(DAG, DL, Parts + RoundParts, OddParts, PartVT,
219                               OddVT, V, InChain, CC);
220 
221         // Combine the round and odd parts.
222         Lo = Val;
223         if (DAG.getDataLayout().isBigEndian())
224           std::swap(Lo, Hi);
225         EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
226         Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi);
227         Hi = DAG.getNode(ISD::SHL, DL, TotalVT, Hi,
228                          DAG.getConstant(Lo.getValueSizeInBits(), DL,
229                                          TLI.getShiftAmountTy(
230                                              TotalVT, DAG.getDataLayout())));
231         Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo);
232         Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi);
233       }
234     } else if (PartVT.isFloatingPoint()) {
235       // FP split into multiple FP parts (for ppcf128)
236       assert(ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
237              "Unexpected split");
238       SDValue Lo, Hi;
239       Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]);
240       Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]);
241       if (TLI.hasBigEndianPartOrdering(ValueVT, DAG.getDataLayout()))
242         std::swap(Lo, Hi);
243       Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi);
244     } else {
245       // FP split into integer parts (soft fp)
246       assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
247              !PartVT.isVector() && "Unexpected split");
248       EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
249       Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V,
250                              InChain, CC);
251     }
252   }
253 
254   // There is now one part, held in Val.  Correct it to match ValueVT.
255   // PartEVT is the type of the register class that holds the value.
256   // ValueVT is the type of the inline asm operation.
257   EVT PartEVT = Val.getValueType();
258 
259   if (PartEVT == ValueVT)
260     return Val;
261 
262   if (PartEVT.isInteger() && ValueVT.isFloatingPoint() &&
263       ValueVT.bitsLT(PartEVT)) {
264     // For an FP value in an integer part, we need to truncate to the right
265     // width first.
266     PartEVT = EVT::getIntegerVT(*DAG.getContext(),  ValueVT.getSizeInBits());
267     Val = DAG.getNode(ISD::TRUNCATE, DL, PartEVT, Val);
268   }
269 
270   // Handle types that have the same size.
271   if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits())
272     return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
273 
274   // Handle types with different sizes.
275   if (PartEVT.isInteger() && ValueVT.isInteger()) {
276     if (ValueVT.bitsLT(PartEVT)) {
277       // For a truncate, see if we have any information to
278       // indicate whether the truncated bits will always be
279       // zero or sign-extension.
280       if (AssertOp)
281         Val = DAG.getNode(*AssertOp, DL, PartEVT, Val,
282                           DAG.getValueType(ValueVT));
283       return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
284     }
285     return DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
286   }
287 
288   if (PartEVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
289     // FP_ROUND's are always exact here.
290     if (ValueVT.bitsLT(Val.getValueType())) {
291 
292       SDValue NoChange =
293           DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout()));
294 
295       if (DAG.getMachineFunction().getFunction().getAttributes().hasFnAttr(
296               llvm::Attribute::StrictFP)) {
297         return DAG.getNode(ISD::STRICT_FP_ROUND, DL,
298                            DAG.getVTList(ValueVT, MVT::Other), InChain, Val,
299                            NoChange);
300       }
301 
302       return DAG.getNode(ISD::FP_ROUND, DL, ValueVT, Val, NoChange);
303     }
304 
305     return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val);
306   }
307 
308   // Handle MMX to a narrower integer type by bitcasting MMX to integer and
309   // then truncating.
310   if (PartEVT == MVT::x86mmx && ValueVT.isInteger() &&
311       ValueVT.bitsLT(PartEVT)) {
312     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i64, Val);
313     return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
314   }
315 
316   report_fatal_error("Unknown mismatch in getCopyFromParts!");
317 }
318 
319 static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V,
320                                               const Twine &ErrMsg) {
321   const Instruction *I = dyn_cast_or_null<Instruction>(V);
322   if (!V)
323     return Ctx.emitError(ErrMsg);
324 
325   const char *AsmError = ", possible invalid constraint for vector type";
326   if (const CallInst *CI = dyn_cast<CallInst>(I))
327     if (CI->isInlineAsm())
328       return Ctx.emitError(I, ErrMsg + AsmError);
329 
330   return Ctx.emitError(I, ErrMsg);
331 }
332 
333 /// getCopyFromPartsVector - Create a value that contains the specified legal
334 /// parts combined into the value they represent.  If the parts combine to a
335 /// type larger than ValueVT then AssertOp can be used to specify whether the
336 /// extra bits are known to be zero (ISD::AssertZext) or sign extended from
337 /// ValueVT (ISD::AssertSext).
338 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
339                                       const SDValue *Parts, unsigned NumParts,
340                                       MVT PartVT, EVT ValueVT, const Value *V,
341                                       SDValue InChain,
342                                       std::optional<CallingConv::ID> CallConv) {
343   assert(ValueVT.isVector() && "Not a vector value");
344   assert(NumParts > 0 && "No parts to assemble!");
345   const bool IsABIRegCopy = CallConv.has_value();
346 
347   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
348   SDValue Val = Parts[0];
349 
350   // Handle a multi-element vector.
351   if (NumParts > 1) {
352     EVT IntermediateVT;
353     MVT RegisterVT;
354     unsigned NumIntermediates;
355     unsigned NumRegs;
356 
357     if (IsABIRegCopy) {
358       NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
359           *DAG.getContext(), *CallConv, ValueVT, IntermediateVT,
360           NumIntermediates, RegisterVT);
361     } else {
362       NumRegs =
363           TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
364                                      NumIntermediates, RegisterVT);
365     }
366 
367     assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
368     NumParts = NumRegs; // Silence a compiler warning.
369     assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
370     assert(RegisterVT.getSizeInBits() ==
371            Parts[0].getSimpleValueType().getSizeInBits() &&
372            "Part type sizes don't match!");
373 
374     // Assemble the parts into intermediate operands.
375     SmallVector<SDValue, 8> Ops(NumIntermediates);
376     if (NumIntermediates == NumParts) {
377       // If the register was not expanded, truncate or copy the value,
378       // as appropriate.
379       for (unsigned i = 0; i != NumParts; ++i)
380         Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1, PartVT, IntermediateVT,
381                                   V, InChain, CallConv);
382     } else if (NumParts > 0) {
383       // If the intermediate type was expanded, build the intermediate
384       // operands from the parts.
385       assert(NumParts % NumIntermediates == 0 &&
386              "Must expand into a divisible number of parts!");
387       unsigned Factor = NumParts / NumIntermediates;
388       for (unsigned i = 0; i != NumIntermediates; ++i)
389         Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor, PartVT,
390                                   IntermediateVT, V, InChain, CallConv);
391     }
392 
393     // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
394     // intermediate operands.
395     EVT BuiltVectorTy =
396         IntermediateVT.isVector()
397             ? EVT::getVectorVT(
398                   *DAG.getContext(), IntermediateVT.getScalarType(),
399                   IntermediateVT.getVectorElementCount() * NumParts)
400             : EVT::getVectorVT(*DAG.getContext(),
401                                IntermediateVT.getScalarType(),
402                                NumIntermediates);
403     Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS
404                                                 : ISD::BUILD_VECTOR,
405                       DL, BuiltVectorTy, Ops);
406   }
407 
408   // There is now one part, held in Val.  Correct it to match ValueVT.
409   EVT PartEVT = Val.getValueType();
410 
411   if (PartEVT == ValueVT)
412     return Val;
413 
414   if (PartEVT.isVector()) {
415     // Vector/Vector bitcast.
416     if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
417       return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
418 
419     // If the parts vector has more elements than the value vector, then we
420     // have a vector widening case (e.g. <2 x float> -> <4 x float>).
421     // Extract the elements we want.
422     if (PartEVT.getVectorElementCount() != ValueVT.getVectorElementCount()) {
423       assert((PartEVT.getVectorElementCount().getKnownMinValue() >
424               ValueVT.getVectorElementCount().getKnownMinValue()) &&
425              (PartEVT.getVectorElementCount().isScalable() ==
426               ValueVT.getVectorElementCount().isScalable()) &&
427              "Cannot narrow, it would be a lossy transformation");
428       PartEVT =
429           EVT::getVectorVT(*DAG.getContext(), PartEVT.getVectorElementType(),
430                            ValueVT.getVectorElementCount());
431       Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, PartEVT, Val,
432                         DAG.getVectorIdxConstant(0, DL));
433       if (PartEVT == ValueVT)
434         return Val;
435       if (PartEVT.isInteger() && ValueVT.isFloatingPoint())
436         return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
437 
438       // Vector/Vector bitcast (e.g. <2 x bfloat> -> <2 x half>).
439       if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
440         return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
441     }
442 
443     // Promoted vector extract
444     return DAG.getAnyExtOrTrunc(Val, DL, ValueVT);
445   }
446 
447   // Trivial bitcast if the types are the same size and the destination
448   // vector type is legal.
449   if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits() &&
450       TLI.isTypeLegal(ValueVT))
451     return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
452 
453   if (ValueVT.getVectorNumElements() != 1) {
454      // Certain ABIs require that vectors are passed as integers. For vectors
455      // are the same size, this is an obvious bitcast.
456      if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) {
457        return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
458      } else if (ValueVT.bitsLT(PartEVT)) {
459        const uint64_t ValueSize = ValueVT.getFixedSizeInBits();
460        EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize);
461        // Drop the extra bits.
462        Val = DAG.getNode(ISD::TRUNCATE, DL, IntermediateType, Val);
463        return DAG.getBitcast(ValueVT, Val);
464      }
465 
466      diagnosePossiblyInvalidConstraint(
467          *DAG.getContext(), V, "non-trivial scalar-to-vector conversion");
468      return DAG.getUNDEF(ValueVT);
469   }
470 
471   // Handle cases such as i8 -> <1 x i1>
472   EVT ValueSVT = ValueVT.getVectorElementType();
473   if (ValueVT.getVectorNumElements() == 1 && ValueSVT != PartEVT) {
474     unsigned ValueSize = ValueSVT.getSizeInBits();
475     if (ValueSize == PartEVT.getSizeInBits()) {
476       Val = DAG.getNode(ISD::BITCAST, DL, ValueSVT, Val);
477     } else if (ValueSVT.isFloatingPoint() && PartEVT.isInteger()) {
478       // It's possible a scalar floating point type gets softened to integer and
479       // then promoted to a larger integer. If PartEVT is the larger integer
480       // we need to truncate it and then bitcast to the FP type.
481       assert(ValueSVT.bitsLT(PartEVT) && "Unexpected types");
482       EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize);
483       Val = DAG.getNode(ISD::TRUNCATE, DL, IntermediateType, Val);
484       Val = DAG.getBitcast(ValueSVT, Val);
485     } else {
486       Val = ValueVT.isFloatingPoint()
487                 ? DAG.getFPExtendOrRound(Val, DL, ValueSVT)
488                 : DAG.getAnyExtOrTrunc(Val, DL, ValueSVT);
489     }
490   }
491 
492   return DAG.getBuildVector(ValueVT, DL, Val);
493 }
494 
495 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl,
496                                  SDValue Val, SDValue *Parts, unsigned NumParts,
497                                  MVT PartVT, const Value *V,
498                                  std::optional<CallingConv::ID> CallConv);
499 
500 /// getCopyToParts - Create a series of nodes that contain the specified value
501 /// split into legal parts.  If the parts contain more bits than Val, then, for
502 /// integers, ExtendKind can be used to specify how to generate the extra bits.
503 static void
504 getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
505                unsigned NumParts, MVT PartVT, const Value *V,
506                std::optional<CallingConv::ID> CallConv = std::nullopt,
507                ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
508   // Let the target split the parts if it wants to
509   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
510   if (TLI.splitValueIntoRegisterParts(DAG, DL, Val, Parts, NumParts, PartVT,
511                                       CallConv))
512     return;
513   EVT ValueVT = Val.getValueType();
514 
515   // Handle the vector case separately.
516   if (ValueVT.isVector())
517     return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V,
518                                 CallConv);
519 
520   unsigned OrigNumParts = NumParts;
521   assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) &&
522          "Copying to an illegal type!");
523 
524   if (NumParts == 0)
525     return;
526 
527   assert(!ValueVT.isVector() && "Vector case handled elsewhere");
528   EVT PartEVT = PartVT;
529   if (PartEVT == ValueVT) {
530     assert(NumParts == 1 && "No-op copy with multiple parts!");
531     Parts[0] = Val;
532     return;
533   }
534 
535   unsigned PartBits = PartVT.getSizeInBits();
536   if (NumParts * PartBits > ValueVT.getSizeInBits()) {
537     // If the parts cover more bits than the value has, promote the value.
538     if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
539       assert(NumParts == 1 && "Do not know what to promote to!");
540       Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val);
541     } else {
542       if (ValueVT.isFloatingPoint()) {
543         // FP values need to be bitcast, then extended if they are being put
544         // into a larger container.
545         ValueVT = EVT::getIntegerVT(*DAG.getContext(),  ValueVT.getSizeInBits());
546         Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
547       }
548       assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
549              ValueVT.isInteger() &&
550              "Unknown mismatch!");
551       ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
552       Val = DAG.getNode(ExtendKind, DL, ValueVT, Val);
553       if (PartVT == MVT::x86mmx)
554         Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
555     }
556   } else if (PartBits == ValueVT.getSizeInBits()) {
557     // Different types of the same size.
558     assert(NumParts == 1 && PartEVT != ValueVT);
559     Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
560   } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
561     // If the parts cover less bits than value has, truncate the value.
562     assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
563            ValueVT.isInteger() &&
564            "Unknown mismatch!");
565     ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
566     Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
567     if (PartVT == MVT::x86mmx)
568       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
569   }
570 
571   // The value may have changed - recompute ValueVT.
572   ValueVT = Val.getValueType();
573   assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
574          "Failed to tile the value with PartVT!");
575 
576   if (NumParts == 1) {
577     if (PartEVT != ValueVT) {
578       diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
579                                         "scalar-to-vector conversion failed");
580       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
581     }
582 
583     Parts[0] = Val;
584     return;
585   }
586 
587   // Expand the value into multiple parts.
588   if (NumParts & (NumParts - 1)) {
589     // The number of parts is not a power of 2.  Split off and copy the tail.
590     assert(PartVT.isInteger() && ValueVT.isInteger() &&
591            "Do not know what to expand to!");
592     unsigned RoundParts = llvm::bit_floor(NumParts);
593     unsigned RoundBits = RoundParts * PartBits;
594     unsigned OddParts = NumParts - RoundParts;
595     SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val,
596       DAG.getShiftAmountConstant(RoundBits, ValueVT, DL));
597 
598     getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V,
599                    CallConv);
600 
601     if (DAG.getDataLayout().isBigEndian())
602       // The odd parts were reversed by getCopyToParts - unreverse them.
603       std::reverse(Parts + RoundParts, Parts + NumParts);
604 
605     NumParts = RoundParts;
606     ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
607     Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
608   }
609 
610   // The number of parts is a power of 2.  Repeatedly bisect the value using
611   // EXTRACT_ELEMENT.
612   Parts[0] = DAG.getNode(ISD::BITCAST, DL,
613                          EVT::getIntegerVT(*DAG.getContext(),
614                                            ValueVT.getSizeInBits()),
615                          Val);
616 
617   for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
618     for (unsigned i = 0; i < NumParts; i += StepSize) {
619       unsigned ThisBits = StepSize * PartBits / 2;
620       EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
621       SDValue &Part0 = Parts[i];
622       SDValue &Part1 = Parts[i+StepSize/2];
623 
624       Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
625                           ThisVT, Part0, DAG.getIntPtrConstant(1, DL));
626       Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
627                           ThisVT, Part0, DAG.getIntPtrConstant(0, DL));
628 
629       if (ThisBits == PartBits && ThisVT != PartVT) {
630         Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0);
631         Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1);
632       }
633     }
634   }
635 
636   if (DAG.getDataLayout().isBigEndian())
637     std::reverse(Parts, Parts + OrigNumParts);
638 }
639 
640 static SDValue widenVectorToPartType(SelectionDAG &DAG, SDValue Val,
641                                      const SDLoc &DL, EVT PartVT) {
642   if (!PartVT.isVector())
643     return SDValue();
644 
645   EVT ValueVT = Val.getValueType();
646   EVT PartEVT = PartVT.getVectorElementType();
647   EVT ValueEVT = ValueVT.getVectorElementType();
648   ElementCount PartNumElts = PartVT.getVectorElementCount();
649   ElementCount ValueNumElts = ValueVT.getVectorElementCount();
650 
651   // We only support widening vectors with equivalent element types and
652   // fixed/scalable properties. If a target needs to widen a fixed-length type
653   // to a scalable one, it should be possible to use INSERT_SUBVECTOR below.
654   if (ElementCount::isKnownLE(PartNumElts, ValueNumElts) ||
655       PartNumElts.isScalable() != ValueNumElts.isScalable())
656     return SDValue();
657 
658   // Have a try for bf16 because some targets share its ABI with fp16.
659   if (ValueEVT == MVT::bf16 && PartEVT == MVT::f16) {
660     assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) &&
661            "Cannot widen to illegal type");
662     Val = DAG.getNode(ISD::BITCAST, DL,
663                       ValueVT.changeVectorElementType(MVT::f16), Val);
664   } else if (PartEVT != ValueEVT) {
665     return SDValue();
666   }
667 
668   // Widening a scalable vector to another scalable vector is done by inserting
669   // the vector into a larger undef one.
670   if (PartNumElts.isScalable())
671     return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
672                        Val, DAG.getVectorIdxConstant(0, DL));
673 
674   // Vector widening case, e.g. <2 x float> -> <4 x float>.  Shuffle in
675   // undef elements.
676   SmallVector<SDValue, 16> Ops;
677   DAG.ExtractVectorElements(Val, Ops);
678   SDValue EltUndef = DAG.getUNDEF(PartEVT);
679   Ops.append((PartNumElts - ValueNumElts).getFixedValue(), EltUndef);
680 
681   // FIXME: Use CONCAT for 2x -> 4x.
682   return DAG.getBuildVector(PartVT, DL, Ops);
683 }
684 
685 /// getCopyToPartsVector - Create a series of nodes that contain the specified
686 /// value split into legal parts.
687 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
688                                  SDValue Val, SDValue *Parts, unsigned NumParts,
689                                  MVT PartVT, const Value *V,
690                                  std::optional<CallingConv::ID> CallConv) {
691   EVT ValueVT = Val.getValueType();
692   assert(ValueVT.isVector() && "Not a vector");
693   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
694   const bool IsABIRegCopy = CallConv.has_value();
695 
696   if (NumParts == 1) {
697     EVT PartEVT = PartVT;
698     if (PartEVT == ValueVT) {
699       // Nothing to do.
700     } else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
701       // Bitconvert vector->vector case.
702       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
703     } else if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, PartVT)) {
704       Val = Widened;
705     } else if (PartVT.isVector() &&
706                PartEVT.getVectorElementType().bitsGE(
707                    ValueVT.getVectorElementType()) &&
708                PartEVT.getVectorElementCount() ==
709                    ValueVT.getVectorElementCount()) {
710 
711       // Promoted vector extract
712       Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
713     } else if (PartEVT.isVector() &&
714                PartEVT.getVectorElementType() !=
715                    ValueVT.getVectorElementType() &&
716                TLI.getTypeAction(*DAG.getContext(), ValueVT) ==
717                    TargetLowering::TypeWidenVector) {
718       // Combination of widening and promotion.
719       EVT WidenVT =
720           EVT::getVectorVT(*DAG.getContext(), ValueVT.getVectorElementType(),
721                            PartVT.getVectorElementCount());
722       SDValue Widened = widenVectorToPartType(DAG, Val, DL, WidenVT);
723       Val = DAG.getAnyExtOrTrunc(Widened, DL, PartVT);
724     } else {
725       // Don't extract an integer from a float vector. This can happen if the
726       // FP type gets softened to integer and then promoted. The promotion
727       // prevents it from being picked up by the earlier bitcast case.
728       if (ValueVT.getVectorElementCount().isScalar() &&
729           (!ValueVT.isFloatingPoint() || !PartVT.isInteger())) {
730         // If we reach this condition and PartVT is FP, this means that
731         // ValueVT is also FP and both have a different size, otherwise we
732         // would have bitcasted them. Producing an EXTRACT_VECTOR_ELT here
733         // would be invalid since that would mean the smaller FP type has to
734         // be extended to the larger one.
735         if (PartVT.isFloatingPoint()) {
736           Val = DAG.getBitcast(ValueVT.getScalarType(), Val);
737           Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val);
738         } else
739           Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, PartVT, Val,
740                             DAG.getVectorIdxConstant(0, DL));
741       } else {
742         uint64_t ValueSize = ValueVT.getFixedSizeInBits();
743         assert(PartVT.getFixedSizeInBits() > ValueSize &&
744                "lossy conversion of vector to scalar type");
745         EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize);
746         Val = DAG.getBitcast(IntermediateType, Val);
747         Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
748       }
749     }
750 
751     assert(Val.getValueType() == PartVT && "Unexpected vector part value type");
752     Parts[0] = Val;
753     return;
754   }
755 
756   // Handle a multi-element vector.
757   EVT IntermediateVT;
758   MVT RegisterVT;
759   unsigned NumIntermediates;
760   unsigned NumRegs;
761   if (IsABIRegCopy) {
762     NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
763         *DAG.getContext(), *CallConv, ValueVT, IntermediateVT, NumIntermediates,
764         RegisterVT);
765   } else {
766     NumRegs =
767         TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
768                                    NumIntermediates, RegisterVT);
769   }
770 
771   assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
772   NumParts = NumRegs; // Silence a compiler warning.
773   assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
774 
775   assert(IntermediateVT.isScalableVector() == ValueVT.isScalableVector() &&
776          "Mixing scalable and fixed vectors when copying in parts");
777 
778   std::optional<ElementCount> DestEltCnt;
779 
780   if (IntermediateVT.isVector())
781     DestEltCnt = IntermediateVT.getVectorElementCount() * NumIntermediates;
782   else
783     DestEltCnt = ElementCount::getFixed(NumIntermediates);
784 
785   EVT BuiltVectorTy = EVT::getVectorVT(
786       *DAG.getContext(), IntermediateVT.getScalarType(), *DestEltCnt);
787 
788   if (ValueVT == BuiltVectorTy) {
789     // Nothing to do.
790   } else if (ValueVT.getSizeInBits() == BuiltVectorTy.getSizeInBits()) {
791     // Bitconvert vector->vector case.
792     Val = DAG.getNode(ISD::BITCAST, DL, BuiltVectorTy, Val);
793   } else {
794     if (BuiltVectorTy.getVectorElementType().bitsGT(
795             ValueVT.getVectorElementType())) {
796       // Integer promotion.
797       ValueVT = EVT::getVectorVT(*DAG.getContext(),
798                                  BuiltVectorTy.getVectorElementType(),
799                                  ValueVT.getVectorElementCount());
800       Val = DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
801     }
802 
803     if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, BuiltVectorTy)) {
804       Val = Widened;
805     }
806   }
807 
808   assert(Val.getValueType() == BuiltVectorTy && "Unexpected vector value type");
809 
810   // Split the vector into intermediate operands.
811   SmallVector<SDValue, 8> Ops(NumIntermediates);
812   for (unsigned i = 0; i != NumIntermediates; ++i) {
813     if (IntermediateVT.isVector()) {
814       // This does something sensible for scalable vectors - see the
815       // definition of EXTRACT_SUBVECTOR for further details.
816       unsigned IntermediateNumElts = IntermediateVT.getVectorMinNumElements();
817       Ops[i] =
818           DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, IntermediateVT, Val,
819                       DAG.getVectorIdxConstant(i * IntermediateNumElts, DL));
820     } else {
821       Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, IntermediateVT, Val,
822                            DAG.getVectorIdxConstant(i, DL));
823     }
824   }
825 
826   // Split the intermediate operands into legal parts.
827   if (NumParts == NumIntermediates) {
828     // If the register was not expanded, promote or copy the value,
829     // as appropriate.
830     for (unsigned i = 0; i != NumParts; ++i)
831       getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V, CallConv);
832   } else if (NumParts > 0) {
833     // If the intermediate type was expanded, split each the value into
834     // legal parts.
835     assert(NumIntermediates != 0 && "division by zero");
836     assert(NumParts % NumIntermediates == 0 &&
837            "Must expand into a divisible number of parts!");
838     unsigned Factor = NumParts / NumIntermediates;
839     for (unsigned i = 0; i != NumIntermediates; ++i)
840       getCopyToParts(DAG, DL, Ops[i], &Parts[i * Factor], Factor, PartVT, V,
841                      CallConv);
842   }
843 }
844 
845 RegsForValue::RegsForValue(const SmallVector<Register, 4> &regs, MVT regvt,
846                            EVT valuevt, std::optional<CallingConv::ID> CC)
847     : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs),
848       RegCount(1, regs.size()), CallConv(CC) {}
849 
850 RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
851                            const DataLayout &DL, Register Reg, Type *Ty,
852                            std::optional<CallingConv::ID> CC) {
853   ComputeValueVTs(TLI, DL, Ty, ValueVTs);
854 
855   CallConv = CC;
856 
857   for (EVT ValueVT : ValueVTs) {
858     unsigned NumRegs =
859         isABIMangled()
860             ? TLI.getNumRegistersForCallingConv(Context, *CC, ValueVT)
861             : TLI.getNumRegisters(Context, ValueVT);
862     MVT RegisterVT =
863         isABIMangled()
864             ? TLI.getRegisterTypeForCallingConv(Context, *CC, ValueVT)
865             : TLI.getRegisterType(Context, ValueVT);
866     for (unsigned i = 0; i != NumRegs; ++i)
867       Regs.push_back(Reg + i);
868     RegVTs.push_back(RegisterVT);
869     RegCount.push_back(NumRegs);
870     Reg = Reg.id() + NumRegs;
871   }
872 }
873 
874 SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
875                                       FunctionLoweringInfo &FuncInfo,
876                                       const SDLoc &dl, SDValue &Chain,
877                                       SDValue *Glue, const Value *V) const {
878   // A Value with type {} or [0 x %t] needs no registers.
879   if (ValueVTs.empty())
880     return SDValue();
881 
882   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
883 
884   // Assemble the legal parts into the final values.
885   SmallVector<SDValue, 4> Values(ValueVTs.size());
886   SmallVector<SDValue, 8> Parts;
887   for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
888     // Copy the legal parts from the registers.
889     EVT ValueVT = ValueVTs[Value];
890     unsigned NumRegs = RegCount[Value];
891     MVT RegisterVT = isABIMangled()
892                          ? TLI.getRegisterTypeForCallingConv(
893                                *DAG.getContext(), *CallConv, RegVTs[Value])
894                          : RegVTs[Value];
895 
896     Parts.resize(NumRegs);
897     for (unsigned i = 0; i != NumRegs; ++i) {
898       SDValue P;
899       if (!Glue) {
900         P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
901       } else {
902         P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Glue);
903         *Glue = P.getValue(2);
904       }
905 
906       Chain = P.getValue(1);
907       Parts[i] = P;
908 
909       // If the source register was virtual and if we know something about it,
910       // add an assert node.
911       if (!Register::isVirtualRegister(Regs[Part + i]) ||
912           !RegisterVT.isInteger())
913         continue;
914 
915       const FunctionLoweringInfo::LiveOutInfo *LOI =
916         FuncInfo.GetLiveOutRegInfo(Regs[Part+i]);
917       if (!LOI)
918         continue;
919 
920       unsigned RegSize = RegisterVT.getScalarSizeInBits();
921       unsigned NumSignBits = LOI->NumSignBits;
922       unsigned NumZeroBits = LOI->Known.countMinLeadingZeros();
923 
924       if (NumZeroBits == RegSize) {
925         // The current value is a zero.
926         // Explicitly express that as it would be easier for
927         // optimizations to kick in.
928         Parts[i] = DAG.getConstant(0, dl, RegisterVT);
929         continue;
930       }
931 
932       // FIXME: We capture more information than the dag can represent.  For
933       // now, just use the tightest assertzext/assertsext possible.
934       bool isSExt;
935       EVT FromVT(MVT::Other);
936       if (NumZeroBits) {
937         FromVT = EVT::getIntegerVT(*DAG.getContext(), RegSize - NumZeroBits);
938         isSExt = false;
939       } else if (NumSignBits > 1) {
940         FromVT =
941             EVT::getIntegerVT(*DAG.getContext(), RegSize - NumSignBits + 1);
942         isSExt = true;
943       } else {
944         continue;
945       }
946       // Add an assertion node.
947       assert(FromVT != MVT::Other);
948       Parts[i] = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
949                              RegisterVT, P, DAG.getValueType(FromVT));
950     }
951 
952     Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(), NumRegs,
953                                      RegisterVT, ValueVT, V, Chain, CallConv);
954     Part += NumRegs;
955     Parts.clear();
956   }
957 
958   return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(ValueVTs), Values);
959 }
960 
961 void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG,
962                                  const SDLoc &dl, SDValue &Chain, SDValue *Glue,
963                                  const Value *V,
964                                  ISD::NodeType PreferredExtendType) const {
965   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
966   ISD::NodeType ExtendKind = PreferredExtendType;
967 
968   // Get the list of the values's legal parts.
969   unsigned NumRegs = Regs.size();
970   SmallVector<SDValue, 8> Parts(NumRegs);
971   for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
972     unsigned NumParts = RegCount[Value];
973 
974     MVT RegisterVT = isABIMangled()
975                          ? TLI.getRegisterTypeForCallingConv(
976                                *DAG.getContext(), *CallConv, RegVTs[Value])
977                          : RegVTs[Value];
978 
979     if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT))
980       ExtendKind = ISD::ZERO_EXTEND;
981 
982     getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value), &Parts[Part],
983                    NumParts, RegisterVT, V, CallConv, ExtendKind);
984     Part += NumParts;
985   }
986 
987   // Copy the parts into the registers.
988   SmallVector<SDValue, 8> Chains(NumRegs);
989   for (unsigned i = 0; i != NumRegs; ++i) {
990     SDValue Part;
991     if (!Glue) {
992       Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
993     } else {
994       Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Glue);
995       *Glue = Part.getValue(1);
996     }
997 
998     Chains[i] = Part.getValue(0);
999   }
1000 
1001   if (NumRegs == 1 || Glue)
1002     // If NumRegs > 1 && Glue is used then the use of the last CopyToReg is
1003     // flagged to it. That is the CopyToReg nodes and the user are considered
1004     // a single scheduling unit. If we create a TokenFactor and return it as
1005     // chain, then the TokenFactor is both a predecessor (operand) of the
1006     // user as well as a successor (the TF operands are flagged to the user).
1007     // c1, f1 = CopyToReg
1008     // c2, f2 = CopyToReg
1009     // c3     = TokenFactor c1, c2
1010     // ...
1011     //        = op c3, ..., f2
1012     Chain = Chains[NumRegs-1];
1013   else
1014     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
1015 }
1016 
1017 void RegsForValue::AddInlineAsmOperands(InlineAsm::Kind Code, bool HasMatching,
1018                                         unsigned MatchingIdx, const SDLoc &dl,
1019                                         SelectionDAG &DAG,
1020                                         std::vector<SDValue> &Ops) const {
1021   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1022 
1023   InlineAsm::Flag Flag(Code, Regs.size());
1024   if (HasMatching)
1025     Flag.setMatchingOp(MatchingIdx);
1026   else if (!Regs.empty() && Register::isVirtualRegister(Regs.front())) {
1027     // Put the register class of the virtual registers in the flag word.  That
1028     // way, later passes can recompute register class constraints for inline
1029     // assembly as well as normal instructions.
1030     // Don't do this for tied operands that can use the regclass information
1031     // from the def.
1032     const MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
1033     const TargetRegisterClass *RC = MRI.getRegClass(Regs.front());
1034     Flag.setRegClass(RC->getID());
1035   }
1036 
1037   SDValue Res = DAG.getTargetConstant(Flag, dl, MVT::i32);
1038   Ops.push_back(Res);
1039 
1040   if (Code == InlineAsm::Kind::Clobber) {
1041     // Clobbers should always have a 1:1 mapping with registers, and may
1042     // reference registers that have illegal (e.g. vector) types. Hence, we
1043     // shouldn't try to apply any sort of splitting logic to them.
1044     assert(Regs.size() == RegVTs.size() && Regs.size() == ValueVTs.size() &&
1045            "No 1:1 mapping from clobbers to regs?");
1046     Register SP = TLI.getStackPointerRegisterToSaveRestore();
1047     (void)SP;
1048     for (unsigned I = 0, E = ValueVTs.size(); I != E; ++I) {
1049       Ops.push_back(DAG.getRegister(Regs[I], RegVTs[I]));
1050       assert(
1051           (Regs[I] != SP ||
1052            DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment()) &&
1053           "If we clobbered the stack pointer, MFI should know about it.");
1054     }
1055     return;
1056   }
1057 
1058   for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
1059     MVT RegisterVT = RegVTs[Value];
1060     unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value],
1061                                            RegisterVT);
1062     for (unsigned i = 0; i != NumRegs; ++i) {
1063       assert(Reg < Regs.size() && "Mismatch in # registers expected");
1064       unsigned TheReg = Regs[Reg++];
1065       Ops.push_back(DAG.getRegister(TheReg, RegisterVT));
1066     }
1067   }
1068 }
1069 
1070 SmallVector<std::pair<Register, TypeSize>, 4>
1071 RegsForValue::getRegsAndSizes() const {
1072   SmallVector<std::pair<Register, TypeSize>, 4> OutVec;
1073   unsigned I = 0;
1074   for (auto CountAndVT : zip_first(RegCount, RegVTs)) {
1075     unsigned RegCount = std::get<0>(CountAndVT);
1076     MVT RegisterVT = std::get<1>(CountAndVT);
1077     TypeSize RegisterSize = RegisterVT.getSizeInBits();
1078     for (unsigned E = I + RegCount; I != E; ++I)
1079       OutVec.push_back(std::make_pair(Regs[I], RegisterSize));
1080   }
1081   return OutVec;
1082 }
1083 
1084 void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis *aa,
1085                                AssumptionCache *ac,
1086                                const TargetLibraryInfo *li) {
1087   AA = aa;
1088   AC = ac;
1089   GFI = gfi;
1090   LibInfo = li;
1091   Context = DAG.getContext();
1092   LPadToCallSiteMap.clear();
1093   SL->init(DAG.getTargetLoweringInfo(), TM, DAG.getDataLayout());
1094   AssignmentTrackingEnabled = isAssignmentTrackingEnabled(
1095       *DAG.getMachineFunction().getFunction().getParent());
1096 }
1097 
1098 void SelectionDAGBuilder::clear() {
1099   NodeMap.clear();
1100   UnusedArgNodeMap.clear();
1101   PendingLoads.clear();
1102   PendingExports.clear();
1103   PendingConstrainedFP.clear();
1104   PendingConstrainedFPStrict.clear();
1105   CurInst = nullptr;
1106   HasTailCall = false;
1107   SDNodeOrder = LowestSDNodeOrder;
1108   StatepointLowering.clear();
1109 }
1110 
1111 void SelectionDAGBuilder::clearDanglingDebugInfo() {
1112   DanglingDebugInfoMap.clear();
1113 }
1114 
1115 // Update DAG root to include dependencies on Pending chains.
1116 SDValue SelectionDAGBuilder::updateRoot(SmallVectorImpl<SDValue> &Pending) {
1117   SDValue Root = DAG.getRoot();
1118 
1119   if (Pending.empty())
1120     return Root;
1121 
1122   // Add current root to PendingChains, unless we already indirectly
1123   // depend on it.
1124   if (Root.getOpcode() != ISD::EntryToken) {
1125     unsigned i = 0, e = Pending.size();
1126     for (; i != e; ++i) {
1127       assert(Pending[i].getNode()->getNumOperands() > 1);
1128       if (Pending[i].getNode()->getOperand(0) == Root)
1129         break;  // Don't add the root if we already indirectly depend on it.
1130     }
1131 
1132     if (i == e)
1133       Pending.push_back(Root);
1134   }
1135 
1136   if (Pending.size() == 1)
1137     Root = Pending[0];
1138   else
1139     Root = DAG.getTokenFactor(getCurSDLoc(), Pending);
1140 
1141   DAG.setRoot(Root);
1142   Pending.clear();
1143   return Root;
1144 }
1145 
1146 SDValue SelectionDAGBuilder::getMemoryRoot() {
1147   return updateRoot(PendingLoads);
1148 }
1149 
1150 SDValue SelectionDAGBuilder::getRoot() {
1151   // Chain up all pending constrained intrinsics together with all
1152   // pending loads, by simply appending them to PendingLoads and
1153   // then calling getMemoryRoot().
1154   PendingLoads.reserve(PendingLoads.size() +
1155                        PendingConstrainedFP.size() +
1156                        PendingConstrainedFPStrict.size());
1157   PendingLoads.append(PendingConstrainedFP.begin(),
1158                       PendingConstrainedFP.end());
1159   PendingLoads.append(PendingConstrainedFPStrict.begin(),
1160                       PendingConstrainedFPStrict.end());
1161   PendingConstrainedFP.clear();
1162   PendingConstrainedFPStrict.clear();
1163   return getMemoryRoot();
1164 }
1165 
1166 SDValue SelectionDAGBuilder::getControlRoot() {
1167   // We need to emit pending fpexcept.strict constrained intrinsics,
1168   // so append them to the PendingExports list.
1169   PendingExports.append(PendingConstrainedFPStrict.begin(),
1170                         PendingConstrainedFPStrict.end());
1171   PendingConstrainedFPStrict.clear();
1172   return updateRoot(PendingExports);
1173 }
1174 
1175 void SelectionDAGBuilder::handleDebugDeclare(Value *Address,
1176                                              DILocalVariable *Variable,
1177                                              DIExpression *Expression,
1178                                              DebugLoc DL) {
1179   assert(Variable && "Missing variable");
1180 
1181   // Check if address has undef value.
1182   if (!Address || isa<UndefValue>(Address) ||
1183       (Address->use_empty() && !isa<Argument>(Address))) {
1184     LLVM_DEBUG(
1185         dbgs()
1186         << "dbg_declare: Dropping debug info (bad/undef/unused-arg address)\n");
1187     return;
1188   }
1189 
1190   bool IsParameter = Variable->isParameter() || isa<Argument>(Address);
1191 
1192   SDValue &N = NodeMap[Address];
1193   if (!N.getNode() && isa<Argument>(Address))
1194     // Check unused arguments map.
1195     N = UnusedArgNodeMap[Address];
1196   SDDbgValue *SDV;
1197   if (N.getNode()) {
1198     if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
1199       Address = BCI->getOperand(0);
1200     // Parameters are handled specially.
1201     auto *FINode = dyn_cast<FrameIndexSDNode>(N.getNode());
1202     if (IsParameter && FINode) {
1203       // Byval parameter. We have a frame index at this point.
1204       SDV = DAG.getFrameIndexDbgValue(Variable, Expression, FINode->getIndex(),
1205                                       /*IsIndirect*/ true, DL, SDNodeOrder);
1206     } else if (isa<Argument>(Address)) {
1207       // Address is an argument, so try to emit its dbg value using
1208       // virtual register info from the FuncInfo.ValueMap.
1209       EmitFuncArgumentDbgValue(Address, Variable, Expression, DL,
1210                                FuncArgumentDbgValueKind::Declare, N);
1211       return;
1212     } else {
1213       SDV = DAG.getDbgValue(Variable, Expression, N.getNode(), N.getResNo(),
1214                             true, DL, SDNodeOrder);
1215     }
1216     DAG.AddDbgValue(SDV, IsParameter);
1217   } else {
1218     // If Address is an argument then try to emit its dbg value using
1219     // virtual register info from the FuncInfo.ValueMap.
1220     if (!EmitFuncArgumentDbgValue(Address, Variable, Expression, DL,
1221                                   FuncArgumentDbgValueKind::Declare, N)) {
1222       LLVM_DEBUG(dbgs() << "dbg_declare: Dropping debug info"
1223                         << " (could not emit func-arg dbg_value)\n");
1224     }
1225   }
1226 }
1227 
1228 void SelectionDAGBuilder::visitDbgInfo(const Instruction &I) {
1229   // Add SDDbgValue nodes for any var locs here. Do so before updating
1230   // SDNodeOrder, as this mapping is {Inst -> Locs BEFORE Inst}.
1231   if (FunctionVarLocs const *FnVarLocs = DAG.getFunctionVarLocs()) {
1232     // Add SDDbgValue nodes for any var locs here. Do so before updating
1233     // SDNodeOrder, as this mapping is {Inst -> Locs BEFORE Inst}.
1234     for (auto It = FnVarLocs->locs_begin(&I), End = FnVarLocs->locs_end(&I);
1235          It != End; ++It) {
1236       auto *Var = FnVarLocs->getDILocalVariable(It->VariableID);
1237       dropDanglingDebugInfo(Var, It->Expr);
1238       if (It->Values.isKillLocation(It->Expr)) {
1239         handleKillDebugValue(Var, It->Expr, It->DL, SDNodeOrder);
1240         continue;
1241       }
1242       SmallVector<Value *> Values(It->Values.location_ops());
1243       if (!handleDebugValue(Values, Var, It->Expr, It->DL, SDNodeOrder,
1244                             It->Values.hasArgList())) {
1245         SmallVector<Value *, 4> Vals(It->Values.location_ops());
1246         addDanglingDebugInfo(Vals,
1247                              FnVarLocs->getDILocalVariable(It->VariableID),
1248                              It->Expr, Vals.size() > 1, It->DL, SDNodeOrder);
1249       }
1250     }
1251   }
1252 
1253   // We must skip DbgVariableRecords if they've already been processed above as
1254   // we have just emitted the debug values resulting from assignment tracking
1255   // analysis, making any existing DbgVariableRecords redundant (and probably
1256   // less correct). We still need to process DbgLabelRecords. This does sink
1257   // DbgLabelRecords to the bottom of the group of debug records. That sholdn't
1258   // be important as it does so deterministcally and ordering between
1259   // DbgLabelRecords and DbgVariableRecords is immaterial (other than for MIR/IR
1260   // printing).
1261   bool SkipDbgVariableRecords = DAG.getFunctionVarLocs();
1262   // Is there is any debug-info attached to this instruction, in the form of
1263   // DbgRecord non-instruction debug-info records.
1264   for (DbgRecord &DR : I.getDbgRecordRange()) {
1265     if (DbgLabelRecord *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
1266       assert(DLR->getLabel() && "Missing label");
1267       SDDbgLabel *SDV =
1268           DAG.getDbgLabel(DLR->getLabel(), DLR->getDebugLoc(), SDNodeOrder);
1269       DAG.AddDbgLabel(SDV);
1270       continue;
1271     }
1272 
1273     if (SkipDbgVariableRecords)
1274       continue;
1275     DbgVariableRecord &DVR = cast<DbgVariableRecord>(DR);
1276     DILocalVariable *Variable = DVR.getVariable();
1277     DIExpression *Expression = DVR.getExpression();
1278     dropDanglingDebugInfo(Variable, Expression);
1279 
1280     if (DVR.getType() == DbgVariableRecord::LocationType::Declare) {
1281       if (FuncInfo.PreprocessedDVRDeclares.contains(&DVR))
1282         continue;
1283       LLVM_DEBUG(dbgs() << "SelectionDAG visiting dbg_declare: " << DVR
1284                         << "\n");
1285       handleDebugDeclare(DVR.getVariableLocationOp(0), Variable, Expression,
1286                          DVR.getDebugLoc());
1287       continue;
1288     }
1289 
1290     // A DbgVariableRecord with no locations is a kill location.
1291     SmallVector<Value *, 4> Values(DVR.location_ops());
1292     if (Values.empty()) {
1293       handleKillDebugValue(Variable, Expression, DVR.getDebugLoc(),
1294                            SDNodeOrder);
1295       continue;
1296     }
1297 
1298     // A DbgVariableRecord with an undef or absent location is also a kill
1299     // location.
1300     if (llvm::any_of(Values,
1301                      [](Value *V) { return !V || isa<UndefValue>(V); })) {
1302       handleKillDebugValue(Variable, Expression, DVR.getDebugLoc(),
1303                            SDNodeOrder);
1304       continue;
1305     }
1306 
1307     bool IsVariadic = DVR.hasArgList();
1308     if (!handleDebugValue(Values, Variable, Expression, DVR.getDebugLoc(),
1309                           SDNodeOrder, IsVariadic)) {
1310       addDanglingDebugInfo(Values, Variable, Expression, IsVariadic,
1311                            DVR.getDebugLoc(), SDNodeOrder);
1312     }
1313   }
1314 }
1315 
1316 void SelectionDAGBuilder::visit(const Instruction &I) {
1317   visitDbgInfo(I);
1318 
1319   // Set up outgoing PHI node register values before emitting the terminator.
1320   if (I.isTerminator()) {
1321     HandlePHINodesInSuccessorBlocks(I.getParent());
1322   }
1323 
1324   // Increase the SDNodeOrder if dealing with a non-debug instruction.
1325   if (!isa<DbgInfoIntrinsic>(I))
1326     ++SDNodeOrder;
1327 
1328   CurInst = &I;
1329 
1330   // Set inserted listener only if required.
1331   bool NodeInserted = false;
1332   std::unique_ptr<SelectionDAG::DAGNodeInsertedListener> InsertedListener;
1333   MDNode *PCSectionsMD = I.getMetadata(LLVMContext::MD_pcsections);
1334   MDNode *MMRA = I.getMetadata(LLVMContext::MD_mmra);
1335   if (PCSectionsMD || MMRA) {
1336     InsertedListener = std::make_unique<SelectionDAG::DAGNodeInsertedListener>(
1337         DAG, [&](SDNode *) { NodeInserted = true; });
1338   }
1339 
1340   visit(I.getOpcode(), I);
1341 
1342   if (!I.isTerminator() && !HasTailCall &&
1343       !isa<GCStatepointInst>(I)) // statepoints handle their exports internally
1344     CopyToExportRegsIfNeeded(&I);
1345 
1346   // Handle metadata.
1347   if (PCSectionsMD || MMRA) {
1348     auto It = NodeMap.find(&I);
1349     if (It != NodeMap.end()) {
1350       if (PCSectionsMD)
1351         DAG.addPCSections(It->second.getNode(), PCSectionsMD);
1352       if (MMRA)
1353         DAG.addMMRAMetadata(It->second.getNode(), MMRA);
1354     } else if (NodeInserted) {
1355       // This should not happen; if it does, don't let it go unnoticed so we can
1356       // fix it. Relevant visit*() function is probably missing a setValue().
1357       errs() << "warning: loosing !pcsections and/or !mmra metadata ["
1358              << I.getModule()->getName() << "]\n";
1359       LLVM_DEBUG(I.dump());
1360       assert(false);
1361     }
1362   }
1363 
1364   CurInst = nullptr;
1365 }
1366 
1367 void SelectionDAGBuilder::visitPHI(const PHINode &) {
1368   llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!");
1369 }
1370 
1371 void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) {
1372   // Note: this doesn't use InstVisitor, because it has to work with
1373   // ConstantExpr's in addition to instructions.
1374   switch (Opcode) {
1375   default: llvm_unreachable("Unknown instruction type encountered!");
1376     // Build the switch statement using the Instruction.def file.
1377 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1378     case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
1379 #include "llvm/IR/Instruction.def"
1380   }
1381 }
1382 
1383 static bool handleDanglingVariadicDebugInfo(SelectionDAG &DAG,
1384                                             DILocalVariable *Variable,
1385                                             DebugLoc DL, unsigned Order,
1386                                             SmallVectorImpl<Value *> &Values,
1387                                             DIExpression *Expression) {
1388   // For variadic dbg_values we will now insert an undef.
1389   // FIXME: We can potentially recover these!
1390   SmallVector<SDDbgOperand, 2> Locs;
1391   for (const Value *V : Values) {
1392     auto *Undef = UndefValue::get(V->getType());
1393     Locs.push_back(SDDbgOperand::fromConst(Undef));
1394   }
1395   SDDbgValue *SDV = DAG.getDbgValueList(Variable, Expression, Locs, {},
1396                                         /*IsIndirect=*/false, DL, Order,
1397                                         /*IsVariadic=*/true);
1398   DAG.AddDbgValue(SDV, /*isParameter=*/false);
1399   return true;
1400 }
1401 
1402 void SelectionDAGBuilder::addDanglingDebugInfo(SmallVectorImpl<Value *> &Values,
1403                                                DILocalVariable *Var,
1404                                                DIExpression *Expr,
1405                                                bool IsVariadic, DebugLoc DL,
1406                                                unsigned Order) {
1407   if (IsVariadic) {
1408     handleDanglingVariadicDebugInfo(DAG, Var, DL, Order, Values, Expr);
1409     return;
1410   }
1411   // TODO: Dangling debug info will eventually either be resolved or produce
1412   // an Undef DBG_VALUE. However in the resolution case, a gap may appear
1413   // between the original dbg.value location and its resolved DBG_VALUE,
1414   // which we should ideally fill with an extra Undef DBG_VALUE.
1415   assert(Values.size() == 1);
1416   DanglingDebugInfoMap[Values[0]].emplace_back(Var, Expr, DL, Order);
1417 }
1418 
1419 void SelectionDAGBuilder::dropDanglingDebugInfo(const DILocalVariable *Variable,
1420                                                 const DIExpression *Expr) {
1421   auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) {
1422     DIVariable *DanglingVariable = DDI.getVariable();
1423     DIExpression *DanglingExpr = DDI.getExpression();
1424     if (DanglingVariable == Variable && Expr->fragmentsOverlap(DanglingExpr)) {
1425       LLVM_DEBUG(dbgs() << "Dropping dangling debug info for "
1426                         << printDDI(nullptr, DDI) << "\n");
1427       return true;
1428     }
1429     return false;
1430   };
1431 
1432   for (auto &DDIMI : DanglingDebugInfoMap) {
1433     DanglingDebugInfoVector &DDIV = DDIMI.second;
1434 
1435     // If debug info is to be dropped, run it through final checks to see
1436     // whether it can be salvaged.
1437     for (auto &DDI : DDIV)
1438       if (isMatchingDbgValue(DDI))
1439         salvageUnresolvedDbgValue(DDIMI.first, DDI);
1440 
1441     erase_if(DDIV, isMatchingDbgValue);
1442   }
1443 }
1444 
1445 // resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V,
1446 // generate the debug data structures now that we've seen its definition.
1447 void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V,
1448                                                    SDValue Val) {
1449   auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V);
1450   if (DanglingDbgInfoIt == DanglingDebugInfoMap.end())
1451     return;
1452 
1453   DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
1454   for (auto &DDI : DDIV) {
1455     DebugLoc DL = DDI.getDebugLoc();
1456     unsigned ValSDNodeOrder = Val.getNode()->getIROrder();
1457     unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
1458     DILocalVariable *Variable = DDI.getVariable();
1459     DIExpression *Expr = DDI.getExpression();
1460     assert(Variable->isValidLocationForIntrinsic(DL) &&
1461            "Expected inlined-at fields to agree");
1462     SDDbgValue *SDV;
1463     if (Val.getNode()) {
1464       // FIXME: I doubt that it is correct to resolve a dangling DbgValue as a
1465       // FuncArgumentDbgValue (it would be hoisted to the function entry, and if
1466       // we couldn't resolve it directly when examining the DbgValue intrinsic
1467       // in the first place we should not be more successful here). Unless we
1468       // have some test case that prove this to be correct we should avoid
1469       // calling EmitFuncArgumentDbgValue here.
1470       if (!EmitFuncArgumentDbgValue(V, Variable, Expr, DL,
1471                                     FuncArgumentDbgValueKind::Value, Val)) {
1472         LLVM_DEBUG(dbgs() << "Resolve dangling debug info for "
1473                           << printDDI(V, DDI) << "\n");
1474         LLVM_DEBUG(dbgs() << "  By mapping to:\n    "; Val.dump());
1475         // Increase the SDNodeOrder for the DbgValue here to make sure it is
1476         // inserted after the definition of Val when emitting the instructions
1477         // after ISel. An alternative could be to teach
1478         // ScheduleDAGSDNodes::EmitSchedule to delay the insertion properly.
1479         LLVM_DEBUG(if (ValSDNodeOrder > DbgSDNodeOrder) dbgs()
1480                    << "changing SDNodeOrder from " << DbgSDNodeOrder << " to "
1481                    << ValSDNodeOrder << "\n");
1482         SDV = getDbgValue(Val, Variable, Expr, DL,
1483                           std::max(DbgSDNodeOrder, ValSDNodeOrder));
1484         DAG.AddDbgValue(SDV, false);
1485       } else
1486         LLVM_DEBUG(dbgs() << "Resolved dangling debug info for "
1487                           << printDDI(V, DDI)
1488                           << " in EmitFuncArgumentDbgValue\n");
1489     } else {
1490       LLVM_DEBUG(dbgs() << "Dropping debug info for " << printDDI(V, DDI)
1491                         << "\n");
1492       auto Undef = UndefValue::get(V->getType());
1493       auto SDV =
1494           DAG.getConstantDbgValue(Variable, Expr, Undef, DL, DbgSDNodeOrder);
1495       DAG.AddDbgValue(SDV, false);
1496     }
1497   }
1498   DDIV.clear();
1499 }
1500 
1501 void SelectionDAGBuilder::salvageUnresolvedDbgValue(const Value *V,
1502                                                     DanglingDebugInfo &DDI) {
1503   // TODO: For the variadic implementation, instead of only checking the fail
1504   // state of `handleDebugValue`, we need know specifically which values were
1505   // invalid, so that we attempt to salvage only those values when processing
1506   // a DIArgList.
1507   const Value *OrigV = V;
1508   DILocalVariable *Var = DDI.getVariable();
1509   DIExpression *Expr = DDI.getExpression();
1510   DebugLoc DL = DDI.getDebugLoc();
1511   unsigned SDOrder = DDI.getSDNodeOrder();
1512 
1513   // Currently we consider only dbg.value intrinsics -- we tell the salvager
1514   // that DW_OP_stack_value is desired.
1515   bool StackValue = true;
1516 
1517   // Can this Value can be encoded without any further work?
1518   if (handleDebugValue(V, Var, Expr, DL, SDOrder, /*IsVariadic=*/false))
1519     return;
1520 
1521   // Attempt to salvage back through as many instructions as possible. Bail if
1522   // a non-instruction is seen, such as a constant expression or global
1523   // variable. FIXME: Further work could recover those too.
1524   while (isa<Instruction>(V)) {
1525     const Instruction &VAsInst = *cast<const Instruction>(V);
1526     // Temporary "0", awaiting real implementation.
1527     SmallVector<uint64_t, 16> Ops;
1528     SmallVector<Value *, 4> AdditionalValues;
1529     V = salvageDebugInfoImpl(const_cast<Instruction &>(VAsInst),
1530                              Expr->getNumLocationOperands(), Ops,
1531                              AdditionalValues);
1532     // If we cannot salvage any further, and haven't yet found a suitable debug
1533     // expression, bail out.
1534     if (!V)
1535       break;
1536 
1537     // TODO: If AdditionalValues isn't empty, then the salvage can only be
1538     // represented with a DBG_VALUE_LIST, so we give up. When we have support
1539     // here for variadic dbg_values, remove that condition.
1540     if (!AdditionalValues.empty())
1541       break;
1542 
1543     // New value and expr now represent this debuginfo.
1544     Expr = DIExpression::appendOpsToArg(Expr, Ops, 0, StackValue);
1545 
1546     // Some kind of simplification occurred: check whether the operand of the
1547     // salvaged debug expression can be encoded in this DAG.
1548     if (handleDebugValue(V, Var, Expr, DL, SDOrder, /*IsVariadic=*/false)) {
1549       LLVM_DEBUG(
1550           dbgs() << "Salvaged debug location info for:\n  " << *Var << "\n"
1551                  << *OrigV << "\nBy stripping back to:\n  " << *V << "\n");
1552       return;
1553     }
1554   }
1555 
1556   // This was the final opportunity to salvage this debug information, and it
1557   // couldn't be done. Place an undef DBG_VALUE at this location to terminate
1558   // any earlier variable location.
1559   assert(OrigV && "V shouldn't be null");
1560   auto *Undef = UndefValue::get(OrigV->getType());
1561   auto *SDV = DAG.getConstantDbgValue(Var, Expr, Undef, DL, SDNodeOrder);
1562   DAG.AddDbgValue(SDV, false);
1563   LLVM_DEBUG(dbgs() << "Dropping debug value info for:\n  "
1564                     << printDDI(OrigV, DDI) << "\n");
1565 }
1566 
1567 void SelectionDAGBuilder::handleKillDebugValue(DILocalVariable *Var,
1568                                                DIExpression *Expr,
1569                                                DebugLoc DbgLoc,
1570                                                unsigned Order) {
1571   Value *Poison = PoisonValue::get(Type::getInt1Ty(*Context));
1572   DIExpression *NewExpr =
1573       const_cast<DIExpression *>(DIExpression::convertToUndefExpression(Expr));
1574   handleDebugValue(Poison, Var, NewExpr, DbgLoc, Order,
1575                    /*IsVariadic*/ false);
1576 }
1577 
1578 bool SelectionDAGBuilder::handleDebugValue(ArrayRef<const Value *> Values,
1579                                            DILocalVariable *Var,
1580                                            DIExpression *Expr, DebugLoc DbgLoc,
1581                                            unsigned Order, bool IsVariadic) {
1582   if (Values.empty())
1583     return true;
1584 
1585   // Filter EntryValue locations out early.
1586   if (visitEntryValueDbgValue(Values, Var, Expr, DbgLoc))
1587     return true;
1588 
1589   SmallVector<SDDbgOperand> LocationOps;
1590   SmallVector<SDNode *> Dependencies;
1591   for (const Value *V : Values) {
1592     // Constant value.
1593     if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V) ||
1594         isa<ConstantPointerNull>(V)) {
1595       LocationOps.emplace_back(SDDbgOperand::fromConst(V));
1596       continue;
1597     }
1598 
1599     // Look through IntToPtr constants.
1600     if (auto *CE = dyn_cast<ConstantExpr>(V))
1601       if (CE->getOpcode() == Instruction::IntToPtr) {
1602         LocationOps.emplace_back(SDDbgOperand::fromConst(CE->getOperand(0)));
1603         continue;
1604       }
1605 
1606     // If the Value is a frame index, we can create a FrameIndex debug value
1607     // without relying on the DAG at all.
1608     if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1609       auto SI = FuncInfo.StaticAllocaMap.find(AI);
1610       if (SI != FuncInfo.StaticAllocaMap.end()) {
1611         LocationOps.emplace_back(SDDbgOperand::fromFrameIdx(SI->second));
1612         continue;
1613       }
1614     }
1615 
1616     // Do not use getValue() in here; we don't want to generate code at
1617     // this point if it hasn't been done yet.
1618     SDValue N = NodeMap[V];
1619     if (!N.getNode() && isa<Argument>(V)) // Check unused arguments map.
1620       N = UnusedArgNodeMap[V];
1621 
1622     if (N.getNode()) {
1623       // Only emit func arg dbg value for non-variadic dbg.values for now.
1624       if (!IsVariadic &&
1625           EmitFuncArgumentDbgValue(V, Var, Expr, DbgLoc,
1626                                    FuncArgumentDbgValueKind::Value, N))
1627         return true;
1628       if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) {
1629         // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can
1630         // describe stack slot locations.
1631         //
1632         // Consider "int x = 0; int *px = &x;". There are two kinds of
1633         // interesting debug values here after optimization:
1634         //
1635         //   dbg.value(i32* %px, !"int *px", !DIExpression()), and
1636         //   dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref))
1637         //
1638         // Both describe the direct values of their associated variables.
1639         Dependencies.push_back(N.getNode());
1640         LocationOps.emplace_back(SDDbgOperand::fromFrameIdx(FISDN->getIndex()));
1641         continue;
1642       }
1643       LocationOps.emplace_back(
1644           SDDbgOperand::fromNode(N.getNode(), N.getResNo()));
1645       continue;
1646     }
1647 
1648     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1649     // Special rules apply for the first dbg.values of parameter variables in a
1650     // function. Identify them by the fact they reference Argument Values, that
1651     // they're parameters, and they are parameters of the current function. We
1652     // need to let them dangle until they get an SDNode.
1653     bool IsParamOfFunc =
1654         isa<Argument>(V) && Var->isParameter() && !DbgLoc.getInlinedAt();
1655     if (IsParamOfFunc)
1656       return false;
1657 
1658     // The value is not used in this block yet (or it would have an SDNode).
1659     // We still want the value to appear for the user if possible -- if it has
1660     // an associated VReg, we can refer to that instead.
1661     auto VMI = FuncInfo.ValueMap.find(V);
1662     if (VMI != FuncInfo.ValueMap.end()) {
1663       unsigned Reg = VMI->second;
1664       // If this is a PHI node, it may be split up into several MI PHI nodes
1665       // (in FunctionLoweringInfo::set).
1666       RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg,
1667                        V->getType(), std::nullopt);
1668       if (RFV.occupiesMultipleRegs()) {
1669         // FIXME: We could potentially support variadic dbg_values here.
1670         if (IsVariadic)
1671           return false;
1672         unsigned Offset = 0;
1673         unsigned BitsToDescribe = 0;
1674         if (auto VarSize = Var->getSizeInBits())
1675           BitsToDescribe = *VarSize;
1676         if (auto Fragment = Expr->getFragmentInfo())
1677           BitsToDescribe = Fragment->SizeInBits;
1678         for (const auto &RegAndSize : RFV.getRegsAndSizes()) {
1679           // Bail out if all bits are described already.
1680           if (Offset >= BitsToDescribe)
1681             break;
1682           // TODO: handle scalable vectors.
1683           unsigned RegisterSize = RegAndSize.second;
1684           unsigned FragmentSize = (Offset + RegisterSize > BitsToDescribe)
1685                                       ? BitsToDescribe - Offset
1686                                       : RegisterSize;
1687           auto FragmentExpr = DIExpression::createFragmentExpression(
1688               Expr, Offset, FragmentSize);
1689           if (!FragmentExpr)
1690             continue;
1691           SDDbgValue *SDV = DAG.getVRegDbgValue(
1692               Var, *FragmentExpr, RegAndSize.first, false, DbgLoc, Order);
1693           DAG.AddDbgValue(SDV, false);
1694           Offset += RegisterSize;
1695         }
1696         return true;
1697       }
1698       // We can use simple vreg locations for variadic dbg_values as well.
1699       LocationOps.emplace_back(SDDbgOperand::fromVReg(Reg));
1700       continue;
1701     }
1702     // We failed to create a SDDbgOperand for V.
1703     return false;
1704   }
1705 
1706   // We have created a SDDbgOperand for each Value in Values.
1707   assert(!LocationOps.empty());
1708   SDDbgValue *SDV =
1709       DAG.getDbgValueList(Var, Expr, LocationOps, Dependencies,
1710                           /*IsIndirect=*/false, DbgLoc, Order, IsVariadic);
1711   DAG.AddDbgValue(SDV, /*isParameter=*/false);
1712   return true;
1713 }
1714 
1715 void SelectionDAGBuilder::resolveOrClearDbgInfo() {
1716   // Try to fixup any remaining dangling debug info -- and drop it if we can't.
1717   for (auto &Pair : DanglingDebugInfoMap)
1718     for (auto &DDI : Pair.second)
1719       salvageUnresolvedDbgValue(const_cast<Value *>(Pair.first), DDI);
1720   clearDanglingDebugInfo();
1721 }
1722 
1723 /// getCopyFromRegs - If there was virtual register allocated for the value V
1724 /// emit CopyFromReg of the specified type Ty. Return empty SDValue() otherwise.
1725 SDValue SelectionDAGBuilder::getCopyFromRegs(const Value *V, Type *Ty) {
1726   DenseMap<const Value *, Register>::iterator It = FuncInfo.ValueMap.find(V);
1727   SDValue Result;
1728 
1729   if (It != FuncInfo.ValueMap.end()) {
1730     Register InReg = It->second;
1731 
1732     RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
1733                      DAG.getDataLayout(), InReg, Ty,
1734                      std::nullopt); // This is not an ABI copy.
1735     SDValue Chain = DAG.getEntryNode();
1736     Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr,
1737                                  V);
1738     resolveDanglingDebugInfo(V, Result);
1739   }
1740 
1741   return Result;
1742 }
1743 
1744 /// getValue - Return an SDValue for the given Value.
1745 SDValue SelectionDAGBuilder::getValue(const Value *V) {
1746   // If we already have an SDValue for this value, use it. It's important
1747   // to do this first, so that we don't create a CopyFromReg if we already
1748   // have a regular SDValue.
1749   SDValue &N = NodeMap[V];
1750   if (N.getNode()) return N;
1751 
1752   // If there's a virtual register allocated and initialized for this
1753   // value, use it.
1754   if (SDValue copyFromReg = getCopyFromRegs(V, V->getType()))
1755     return copyFromReg;
1756 
1757   // Otherwise create a new SDValue and remember it.
1758   SDValue Val = getValueImpl(V);
1759   NodeMap[V] = Val;
1760   resolveDanglingDebugInfo(V, Val);
1761   return Val;
1762 }
1763 
1764 /// getNonRegisterValue - Return an SDValue for the given Value, but
1765 /// don't look in FuncInfo.ValueMap for a virtual register.
1766 SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) {
1767   // If we already have an SDValue for this value, use it.
1768   SDValue &N = NodeMap[V];
1769   if (N.getNode()) {
1770     if (isIntOrFPConstant(N)) {
1771       // Remove the debug location from the node as the node is about to be used
1772       // in a location which may differ from the original debug location.  This
1773       // is relevant to Constant and ConstantFP nodes because they can appear
1774       // as constant expressions inside PHI nodes.
1775       N->setDebugLoc(DebugLoc());
1776     }
1777     return N;
1778   }
1779 
1780   // Otherwise create a new SDValue and remember it.
1781   SDValue Val = getValueImpl(V);
1782   NodeMap[V] = Val;
1783   resolveDanglingDebugInfo(V, Val);
1784   return Val;
1785 }
1786 
1787 /// getValueImpl - Helper function for getValue and getNonRegisterValue.
1788 /// Create an SDValue for the given value.
1789 SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
1790   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1791 
1792   if (const Constant *C = dyn_cast<Constant>(V)) {
1793     EVT VT = TLI.getValueType(DAG.getDataLayout(), V->getType(), true);
1794 
1795     if (const ConstantInt *CI = dyn_cast<ConstantInt>(C))
1796       return DAG.getConstant(*CI, getCurSDLoc(), VT);
1797 
1798     if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
1799       return DAG.getGlobalAddress(GV, getCurSDLoc(), VT);
1800 
1801     if (const ConstantPtrAuth *CPA = dyn_cast<ConstantPtrAuth>(C)) {
1802       return DAG.getNode(ISD::PtrAuthGlobalAddress, getCurSDLoc(), VT,
1803                          getValue(CPA->getPointer()), getValue(CPA->getKey()),
1804                          getValue(CPA->getAddrDiscriminator()),
1805                          getValue(CPA->getDiscriminator()));
1806     }
1807 
1808     if (isa<ConstantPointerNull>(C)) {
1809       unsigned AS = V->getType()->getPointerAddressSpace();
1810       return DAG.getConstant(0, getCurSDLoc(),
1811                              TLI.getPointerTy(DAG.getDataLayout(), AS));
1812     }
1813 
1814     if (match(C, m_VScale()))
1815       return DAG.getVScale(getCurSDLoc(), VT, APInt(VT.getSizeInBits(), 1));
1816 
1817     if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
1818       return DAG.getConstantFP(*CFP, getCurSDLoc(), VT);
1819 
1820     if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
1821       return DAG.getUNDEF(VT);
1822 
1823     if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
1824       visit(CE->getOpcode(), *CE);
1825       SDValue N1 = NodeMap[V];
1826       assert(N1.getNode() && "visit didn't populate the NodeMap!");
1827       return N1;
1828     }
1829 
1830     if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
1831       SmallVector<SDValue, 4> Constants;
1832       for (const Use &U : C->operands()) {
1833         SDNode *Val = getValue(U).getNode();
1834         // If the operand is an empty aggregate, there are no values.
1835         if (!Val) continue;
1836         // Add each leaf value from the operand to the Constants list
1837         // to form a flattened list of all the values.
1838         for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1839           Constants.push_back(SDValue(Val, i));
1840       }
1841 
1842       return DAG.getMergeValues(Constants, getCurSDLoc());
1843     }
1844 
1845     if (const ConstantDataSequential *CDS =
1846           dyn_cast<ConstantDataSequential>(C)) {
1847       SmallVector<SDValue, 4> Ops;
1848       for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1849         SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode();
1850         // Add each leaf value from the operand to the Constants list
1851         // to form a flattened list of all the values.
1852         for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1853           Ops.push_back(SDValue(Val, i));
1854       }
1855 
1856       if (isa<ArrayType>(CDS->getType()))
1857         return DAG.getMergeValues(Ops, getCurSDLoc());
1858       return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1859     }
1860 
1861     if (C->getType()->isStructTy() || C->getType()->isArrayTy()) {
1862       assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
1863              "Unknown struct or array constant!");
1864 
1865       SmallVector<EVT, 4> ValueVTs;
1866       ComputeValueVTs(TLI, DAG.getDataLayout(), C->getType(), ValueVTs);
1867       unsigned NumElts = ValueVTs.size();
1868       if (NumElts == 0)
1869         return SDValue(); // empty struct
1870       SmallVector<SDValue, 4> Constants(NumElts);
1871       for (unsigned i = 0; i != NumElts; ++i) {
1872         EVT EltVT = ValueVTs[i];
1873         if (isa<UndefValue>(C))
1874           Constants[i] = DAG.getUNDEF(EltVT);
1875         else if (EltVT.isFloatingPoint())
1876           Constants[i] = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1877         else
1878           Constants[i] = DAG.getConstant(0, getCurSDLoc(), EltVT);
1879       }
1880 
1881       return DAG.getMergeValues(Constants, getCurSDLoc());
1882     }
1883 
1884     if (const BlockAddress *BA = dyn_cast<BlockAddress>(C))
1885       return DAG.getBlockAddress(BA, VT);
1886 
1887     if (const auto *Equiv = dyn_cast<DSOLocalEquivalent>(C))
1888       return getValue(Equiv->getGlobalValue());
1889 
1890     if (const auto *NC = dyn_cast<NoCFIValue>(C))
1891       return getValue(NC->getGlobalValue());
1892 
1893     if (VT == MVT::aarch64svcount) {
1894       assert(C->isNullValue() && "Can only zero this target type!");
1895       return DAG.getNode(ISD::BITCAST, getCurSDLoc(), VT,
1896                          DAG.getConstant(0, getCurSDLoc(), MVT::nxv16i1));
1897     }
1898 
1899     if (VT.isRISCVVectorTuple()) {
1900       assert(C->isNullValue() && "Can only zero this target type!");
1901       return NodeMap[V] = DAG.getNode(
1902                  ISD::BITCAST, getCurSDLoc(), VT,
1903                  DAG.getNode(
1904                      ISD::SPLAT_VECTOR, getCurSDLoc(),
1905                      EVT::getVectorVT(*DAG.getContext(), MVT::i8,
1906                                       VT.getSizeInBits().getKnownMinValue() / 8,
1907                                       true),
1908                      DAG.getConstant(0, getCurSDLoc(), MVT::getIntegerVT(8))));
1909     }
1910 
1911     VectorType *VecTy = cast<VectorType>(V->getType());
1912 
1913     // Now that we know the number and type of the elements, get that number of
1914     // elements into the Ops array based on what kind of constant it is.
1915     if (const ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
1916       SmallVector<SDValue, 16> Ops;
1917       unsigned NumElements = cast<FixedVectorType>(VecTy)->getNumElements();
1918       for (unsigned i = 0; i != NumElements; ++i)
1919         Ops.push_back(getValue(CV->getOperand(i)));
1920 
1921       return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1922     }
1923 
1924     if (isa<ConstantAggregateZero>(C)) {
1925       EVT EltVT =
1926           TLI.getValueType(DAG.getDataLayout(), VecTy->getElementType());
1927 
1928       SDValue Op;
1929       if (EltVT.isFloatingPoint())
1930         Op = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1931       else
1932         Op = DAG.getConstant(0, getCurSDLoc(), EltVT);
1933 
1934       return NodeMap[V] = DAG.getSplat(VT, getCurSDLoc(), Op);
1935     }
1936 
1937     llvm_unreachable("Unknown vector constant");
1938   }
1939 
1940   // If this is a static alloca, generate it as the frameindex instead of
1941   // computation.
1942   if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1943     DenseMap<const AllocaInst*, int>::iterator SI =
1944       FuncInfo.StaticAllocaMap.find(AI);
1945     if (SI != FuncInfo.StaticAllocaMap.end())
1946       return DAG.getFrameIndex(
1947           SI->second, TLI.getValueType(DAG.getDataLayout(), AI->getType()));
1948   }
1949 
1950   // If this is an instruction which fast-isel has deferred, select it now.
1951   if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
1952     Register InReg = FuncInfo.InitializeRegForValue(Inst);
1953 
1954     RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg,
1955                      Inst->getType(), std::nullopt);
1956     SDValue Chain = DAG.getEntryNode();
1957     return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
1958   }
1959 
1960   if (const MetadataAsValue *MD = dyn_cast<MetadataAsValue>(V))
1961     return DAG.getMDNode(cast<MDNode>(MD->getMetadata()));
1962 
1963   if (const auto *BB = dyn_cast<BasicBlock>(V))
1964     return DAG.getBasicBlock(FuncInfo.getMBB(BB));
1965 
1966   llvm_unreachable("Can't get register for value!");
1967 }
1968 
1969 void SelectionDAGBuilder::visitCatchPad(const CatchPadInst &I) {
1970   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1971   bool IsMSVCCXX = Pers == EHPersonality::MSVC_CXX;
1972   bool IsCoreCLR = Pers == EHPersonality::CoreCLR;
1973   bool IsSEH = isAsynchronousEHPersonality(Pers);
1974   MachineBasicBlock *CatchPadMBB = FuncInfo.MBB;
1975   if (!IsSEH)
1976     CatchPadMBB->setIsEHScopeEntry();
1977   // In MSVC C++ and CoreCLR, catchblocks are funclets and need prologues.
1978   if (IsMSVCCXX || IsCoreCLR)
1979     CatchPadMBB->setIsEHFuncletEntry();
1980 }
1981 
1982 void SelectionDAGBuilder::visitCatchRet(const CatchReturnInst &I) {
1983   // Update machine-CFG edge.
1984   MachineBasicBlock *TargetMBB = FuncInfo.getMBB(I.getSuccessor());
1985   FuncInfo.MBB->addSuccessor(TargetMBB);
1986   TargetMBB->setIsEHCatchretTarget(true);
1987   DAG.getMachineFunction().setHasEHCatchret(true);
1988 
1989   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1990   bool IsSEH = isAsynchronousEHPersonality(Pers);
1991   if (IsSEH) {
1992     // If this is not a fall-through branch or optimizations are switched off,
1993     // emit the branch.
1994     if (TargetMBB != NextBlock(FuncInfo.MBB) ||
1995         TM.getOptLevel() == CodeGenOptLevel::None)
1996       DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
1997                               getControlRoot(), DAG.getBasicBlock(TargetMBB)));
1998     return;
1999   }
2000 
2001   // Figure out the funclet membership for the catchret's successor.
2002   // This will be used by the FuncletLayout pass to determine how to order the
2003   // BB's.
2004   // A 'catchret' returns to the outer scope's color.
2005   Value *ParentPad = I.getCatchSwitchParentPad();
2006   const BasicBlock *SuccessorColor;
2007   if (isa<ConstantTokenNone>(ParentPad))
2008     SuccessorColor = &FuncInfo.Fn->getEntryBlock();
2009   else
2010     SuccessorColor = cast<Instruction>(ParentPad)->getParent();
2011   assert(SuccessorColor && "No parent funclet for catchret!");
2012   MachineBasicBlock *SuccessorColorMBB = FuncInfo.getMBB(SuccessorColor);
2013   assert(SuccessorColorMBB && "No MBB for SuccessorColor!");
2014 
2015   // Create the terminator node.
2016   SDValue Ret = DAG.getNode(ISD::CATCHRET, getCurSDLoc(), MVT::Other,
2017                             getControlRoot(), DAG.getBasicBlock(TargetMBB),
2018                             DAG.getBasicBlock(SuccessorColorMBB));
2019   DAG.setRoot(Ret);
2020 }
2021 
2022 void SelectionDAGBuilder::visitCleanupPad(const CleanupPadInst &CPI) {
2023   // Don't emit any special code for the cleanuppad instruction. It just marks
2024   // the start of an EH scope/funclet.
2025   FuncInfo.MBB->setIsEHScopeEntry();
2026   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
2027   if (Pers != EHPersonality::Wasm_CXX) {
2028     FuncInfo.MBB->setIsEHFuncletEntry();
2029     FuncInfo.MBB->setIsCleanupFuncletEntry();
2030   }
2031 }
2032 
2033 // In wasm EH, even though a catchpad may not catch an exception if a tag does
2034 // not match, it is OK to add only the first unwind destination catchpad to the
2035 // successors, because there will be at least one invoke instruction within the
2036 // catch scope that points to the next unwind destination, if one exists, so
2037 // CFGSort cannot mess up with BB sorting order.
2038 // (All catchpads with 'catch (type)' clauses have a 'llvm.rethrow' intrinsic
2039 // call within them, and catchpads only consisting of 'catch (...)' have a
2040 // '__cxa_end_catch' call within them, both of which generate invokes in case
2041 // the next unwind destination exists, i.e., the next unwind destination is not
2042 // the caller.)
2043 //
2044 // Having at most one EH pad successor is also simpler and helps later
2045 // transformations.
2046 //
2047 // For example,
2048 // current:
2049 //   invoke void @foo to ... unwind label %catch.dispatch
2050 // catch.dispatch:
2051 //   %0 = catchswitch within ... [label %catch.start] unwind label %next
2052 // catch.start:
2053 //   ...
2054 //   ... in this BB or some other child BB dominated by this BB there will be an
2055 //   invoke that points to 'next' BB as an unwind destination
2056 //
2057 // next: ; We don't need to add this to 'current' BB's successor
2058 //   ...
2059 static void findWasmUnwindDestinations(
2060     FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
2061     BranchProbability Prob,
2062     SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
2063         &UnwindDests) {
2064   while (EHPadBB) {
2065     const Instruction *Pad = EHPadBB->getFirstNonPHI();
2066     if (isa<CleanupPadInst>(Pad)) {
2067       // Stop on cleanup pads.
2068       UnwindDests.emplace_back(FuncInfo.getMBB(EHPadBB), Prob);
2069       UnwindDests.back().first->setIsEHScopeEntry();
2070       break;
2071     } else if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2072       // Add the catchpad handlers to the possible destinations. We don't
2073       // continue to the unwind destination of the catchswitch for wasm.
2074       for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2075         UnwindDests.emplace_back(FuncInfo.getMBB(CatchPadBB), Prob);
2076         UnwindDests.back().first->setIsEHScopeEntry();
2077       }
2078       break;
2079     } else {
2080       continue;
2081     }
2082   }
2083 }
2084 
2085 /// When an invoke or a cleanupret unwinds to the next EH pad, there are
2086 /// many places it could ultimately go. In the IR, we have a single unwind
2087 /// destination, but in the machine CFG, we enumerate all the possible blocks.
2088 /// This function skips over imaginary basic blocks that hold catchswitch
2089 /// instructions, and finds all the "real" machine
2090 /// basic block destinations. As those destinations may not be successors of
2091 /// EHPadBB, here we also calculate the edge probability to those destinations.
2092 /// The passed-in Prob is the edge probability to EHPadBB.
2093 static void findUnwindDestinations(
2094     FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
2095     BranchProbability Prob,
2096     SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
2097         &UnwindDests) {
2098   EHPersonality Personality =
2099     classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
2100   bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX;
2101   bool IsCoreCLR = Personality == EHPersonality::CoreCLR;
2102   bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX;
2103   bool IsSEH = isAsynchronousEHPersonality(Personality);
2104 
2105   if (IsWasmCXX) {
2106     findWasmUnwindDestinations(FuncInfo, EHPadBB, Prob, UnwindDests);
2107     assert(UnwindDests.size() <= 1 &&
2108            "There should be at most one unwind destination for wasm");
2109     return;
2110   }
2111 
2112   while (EHPadBB) {
2113     const Instruction *Pad = EHPadBB->getFirstNonPHI();
2114     BasicBlock *NewEHPadBB = nullptr;
2115     if (isa<LandingPadInst>(Pad)) {
2116       // Stop on landingpads. They are not funclets.
2117       UnwindDests.emplace_back(FuncInfo.getMBB(EHPadBB), Prob);
2118       break;
2119     } else if (isa<CleanupPadInst>(Pad)) {
2120       // Stop on cleanup pads. Cleanups are always funclet entries for all known
2121       // personalities.
2122       UnwindDests.emplace_back(FuncInfo.getMBB(EHPadBB), Prob);
2123       UnwindDests.back().first->setIsEHScopeEntry();
2124       UnwindDests.back().first->setIsEHFuncletEntry();
2125       break;
2126     } else if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2127       // Add the catchpad handlers to the possible destinations.
2128       for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2129         UnwindDests.emplace_back(FuncInfo.getMBB(CatchPadBB), Prob);
2130         // For MSVC++ and the CLR, catchblocks are funclets and need prologues.
2131         if (IsMSVCCXX || IsCoreCLR)
2132           UnwindDests.back().first->setIsEHFuncletEntry();
2133         if (!IsSEH)
2134           UnwindDests.back().first->setIsEHScopeEntry();
2135       }
2136       NewEHPadBB = CatchSwitch->getUnwindDest();
2137     } else {
2138       continue;
2139     }
2140 
2141     BranchProbabilityInfo *BPI = FuncInfo.BPI;
2142     if (BPI && NewEHPadBB)
2143       Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB);
2144     EHPadBB = NewEHPadBB;
2145   }
2146 }
2147 
2148 void SelectionDAGBuilder::visitCleanupRet(const CleanupReturnInst &I) {
2149   // Update successor info.
2150   SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
2151   auto UnwindDest = I.getUnwindDest();
2152   BranchProbabilityInfo *BPI = FuncInfo.BPI;
2153   BranchProbability UnwindDestProb =
2154       (BPI && UnwindDest)
2155           ? BPI->getEdgeProbability(FuncInfo.MBB->getBasicBlock(), UnwindDest)
2156           : BranchProbability::getZero();
2157   findUnwindDestinations(FuncInfo, UnwindDest, UnwindDestProb, UnwindDests);
2158   for (auto &UnwindDest : UnwindDests) {
2159     UnwindDest.first->setIsEHPad();
2160     addSuccessorWithProb(FuncInfo.MBB, UnwindDest.first, UnwindDest.second);
2161   }
2162   FuncInfo.MBB->normalizeSuccProbs();
2163 
2164   // Create the terminator node.
2165   MachineBasicBlock *CleanupPadMBB =
2166       FuncInfo.getMBB(I.getCleanupPad()->getParent());
2167   SDValue Ret = DAG.getNode(ISD::CLEANUPRET, getCurSDLoc(), MVT::Other,
2168                             getControlRoot(), DAG.getBasicBlock(CleanupPadMBB));
2169   DAG.setRoot(Ret);
2170 }
2171 
2172 void SelectionDAGBuilder::visitCatchSwitch(const CatchSwitchInst &CSI) {
2173   report_fatal_error("visitCatchSwitch not yet implemented!");
2174 }
2175 
2176 void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
2177   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2178   auto &DL = DAG.getDataLayout();
2179   SDValue Chain = getControlRoot();
2180   SmallVector<ISD::OutputArg, 8> Outs;
2181   SmallVector<SDValue, 8> OutVals;
2182 
2183   // Calls to @llvm.experimental.deoptimize don't generate a return value, so
2184   // lower
2185   //
2186   //   %val = call <ty> @llvm.experimental.deoptimize()
2187   //   ret <ty> %val
2188   //
2189   // differently.
2190   if (I.getParent()->getTerminatingDeoptimizeCall()) {
2191     LowerDeoptimizingReturn();
2192     return;
2193   }
2194 
2195   if (!FuncInfo.CanLowerReturn) {
2196     Register DemoteReg = FuncInfo.DemoteRegister;
2197     const Function *F = I.getParent()->getParent();
2198 
2199     // Emit a store of the return value through the virtual register.
2200     // Leave Outs empty so that LowerReturn won't try to load return
2201     // registers the usual way.
2202     SmallVector<EVT, 1> PtrValueVTs;
2203     ComputeValueVTs(TLI, DL,
2204                     PointerType::get(F->getContext(),
2205                                      DAG.getDataLayout().getAllocaAddrSpace()),
2206                     PtrValueVTs);
2207 
2208     SDValue RetPtr =
2209         DAG.getCopyFromReg(Chain, getCurSDLoc(), DemoteReg, PtrValueVTs[0]);
2210     SDValue RetOp = getValue(I.getOperand(0));
2211 
2212     SmallVector<EVT, 4> ValueVTs, MemVTs;
2213     SmallVector<uint64_t, 4> Offsets;
2214     ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs, &MemVTs,
2215                     &Offsets, 0);
2216     unsigned NumValues = ValueVTs.size();
2217 
2218     SmallVector<SDValue, 4> Chains(NumValues);
2219     Align BaseAlign = DL.getPrefTypeAlign(I.getOperand(0)->getType());
2220     for (unsigned i = 0; i != NumValues; ++i) {
2221       // An aggregate return value cannot wrap around the address space, so
2222       // offsets to its parts don't wrap either.
2223       SDValue Ptr = DAG.getObjectPtrOffset(getCurSDLoc(), RetPtr,
2224                                            TypeSize::getFixed(Offsets[i]));
2225 
2226       SDValue Val = RetOp.getValue(RetOp.getResNo() + i);
2227       if (MemVTs[i] != ValueVTs[i])
2228         Val = DAG.getPtrExtOrTrunc(Val, getCurSDLoc(), MemVTs[i]);
2229       Chains[i] = DAG.getStore(
2230           Chain, getCurSDLoc(), Val,
2231           // FIXME: better loc info would be nice.
2232           Ptr, MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()),
2233           commonAlignment(BaseAlign, Offsets[i]));
2234     }
2235 
2236     Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
2237                         MVT::Other, Chains);
2238   } else if (I.getNumOperands() != 0) {
2239     SmallVector<EVT, 4> ValueVTs;
2240     ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs);
2241     unsigned NumValues = ValueVTs.size();
2242     if (NumValues) {
2243       SDValue RetOp = getValue(I.getOperand(0));
2244 
2245       const Function *F = I.getParent()->getParent();
2246 
2247       bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
2248           I.getOperand(0)->getType(), F->getCallingConv(),
2249           /*IsVarArg*/ false, DL);
2250 
2251       ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
2252       if (F->getAttributes().hasRetAttr(Attribute::SExt))
2253         ExtendKind = ISD::SIGN_EXTEND;
2254       else if (F->getAttributes().hasRetAttr(Attribute::ZExt))
2255         ExtendKind = ISD::ZERO_EXTEND;
2256 
2257       LLVMContext &Context = F->getContext();
2258       bool RetInReg = F->getAttributes().hasRetAttr(Attribute::InReg);
2259 
2260       for (unsigned j = 0; j != NumValues; ++j) {
2261         EVT VT = ValueVTs[j];
2262 
2263         if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
2264           VT = TLI.getTypeForExtReturn(Context, VT, ExtendKind);
2265 
2266         CallingConv::ID CC = F->getCallingConv();
2267 
2268         unsigned NumParts = TLI.getNumRegistersForCallingConv(Context, CC, VT);
2269         MVT PartVT = TLI.getRegisterTypeForCallingConv(Context, CC, VT);
2270         SmallVector<SDValue, 4> Parts(NumParts);
2271         getCopyToParts(DAG, getCurSDLoc(),
2272                        SDValue(RetOp.getNode(), RetOp.getResNo() + j),
2273                        &Parts[0], NumParts, PartVT, &I, CC, ExtendKind);
2274 
2275         // 'inreg' on function refers to return value
2276         ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
2277         if (RetInReg)
2278           Flags.setInReg();
2279 
2280         if (I.getOperand(0)->getType()->isPointerTy()) {
2281           Flags.setPointer();
2282           Flags.setPointerAddrSpace(
2283               cast<PointerType>(I.getOperand(0)->getType())->getAddressSpace());
2284         }
2285 
2286         if (NeedsRegBlock) {
2287           Flags.setInConsecutiveRegs();
2288           if (j == NumValues - 1)
2289             Flags.setInConsecutiveRegsLast();
2290         }
2291 
2292         // Propagate extension type if any
2293         if (ExtendKind == ISD::SIGN_EXTEND)
2294           Flags.setSExt();
2295         else if (ExtendKind == ISD::ZERO_EXTEND)
2296           Flags.setZExt();
2297         else if (F->getAttributes().hasRetAttr(Attribute::NoExt))
2298           Flags.setNoExt();
2299 
2300         for (unsigned i = 0; i < NumParts; ++i) {
2301           Outs.push_back(ISD::OutputArg(Flags,
2302                                         Parts[i].getValueType().getSimpleVT(),
2303                                         VT, /*isfixed=*/true, 0, 0));
2304           OutVals.push_back(Parts[i]);
2305         }
2306       }
2307     }
2308   }
2309 
2310   // Push in swifterror virtual register as the last element of Outs. This makes
2311   // sure swifterror virtual register will be returned in the swifterror
2312   // physical register.
2313   const Function *F = I.getParent()->getParent();
2314   if (TLI.supportSwiftError() &&
2315       F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
2316     assert(SwiftError.getFunctionArg() && "Need a swift error argument");
2317     ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
2318     Flags.setSwiftError();
2319     Outs.push_back(ISD::OutputArg(
2320         Flags, /*vt=*/TLI.getPointerTy(DL), /*argvt=*/EVT(TLI.getPointerTy(DL)),
2321         /*isfixed=*/true, /*origidx=*/1, /*partOffs=*/0));
2322     // Create SDNode for the swifterror virtual register.
2323     OutVals.push_back(
2324         DAG.getRegister(SwiftError.getOrCreateVRegUseAt(
2325                             &I, FuncInfo.MBB, SwiftError.getFunctionArg()),
2326                         EVT(TLI.getPointerTy(DL))));
2327   }
2328 
2329   bool isVarArg = DAG.getMachineFunction().getFunction().isVarArg();
2330   CallingConv::ID CallConv =
2331     DAG.getMachineFunction().getFunction().getCallingConv();
2332   Chain = DAG.getTargetLoweringInfo().LowerReturn(
2333       Chain, CallConv, isVarArg, Outs, OutVals, getCurSDLoc(), DAG);
2334 
2335   // Verify that the target's LowerReturn behaved as expected.
2336   assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
2337          "LowerReturn didn't return a valid chain!");
2338 
2339   // Update the DAG with the new chain value resulting from return lowering.
2340   DAG.setRoot(Chain);
2341 }
2342 
2343 /// CopyToExportRegsIfNeeded - If the given value has virtual registers
2344 /// created for it, emit nodes to copy the value into the virtual
2345 /// registers.
2346 void SelectionDAGBuilder::CopyToExportRegsIfNeeded(const Value *V) {
2347   // Skip empty types
2348   if (V->getType()->isEmptyTy())
2349     return;
2350 
2351   DenseMap<const Value *, Register>::iterator VMI = FuncInfo.ValueMap.find(V);
2352   if (VMI != FuncInfo.ValueMap.end()) {
2353     assert((!V->use_empty() || isa<CallBrInst>(V)) &&
2354            "Unused value assigned virtual registers!");
2355     CopyValueToVirtualRegister(V, VMI->second);
2356   }
2357 }
2358 
2359 /// ExportFromCurrentBlock - If this condition isn't known to be exported from
2360 /// the current basic block, add it to ValueMap now so that we'll get a
2361 /// CopyTo/FromReg.
2362 void SelectionDAGBuilder::ExportFromCurrentBlock(const Value *V) {
2363   // No need to export constants.
2364   if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
2365 
2366   // Already exported?
2367   if (FuncInfo.isExportedInst(V)) return;
2368 
2369   Register Reg = FuncInfo.InitializeRegForValue(V);
2370   CopyValueToVirtualRegister(V, Reg);
2371 }
2372 
2373 bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V,
2374                                                      const BasicBlock *FromBB) {
2375   // The operands of the setcc have to be in this block.  We don't know
2376   // how to export them from some other block.
2377   if (const Instruction *VI = dyn_cast<Instruction>(V)) {
2378     // Can export from current BB.
2379     if (VI->getParent() == FromBB)
2380       return true;
2381 
2382     // Is already exported, noop.
2383     return FuncInfo.isExportedInst(V);
2384   }
2385 
2386   // If this is an argument, we can export it if the BB is the entry block or
2387   // if it is already exported.
2388   if (isa<Argument>(V)) {
2389     if (FromBB->isEntryBlock())
2390       return true;
2391 
2392     // Otherwise, can only export this if it is already exported.
2393     return FuncInfo.isExportedInst(V);
2394   }
2395 
2396   // Otherwise, constants can always be exported.
2397   return true;
2398 }
2399 
2400 /// Return branch probability calculated by BranchProbabilityInfo for IR blocks.
2401 BranchProbability
2402 SelectionDAGBuilder::getEdgeProbability(const MachineBasicBlock *Src,
2403                                         const MachineBasicBlock *Dst) const {
2404   BranchProbabilityInfo *BPI = FuncInfo.BPI;
2405   const BasicBlock *SrcBB = Src->getBasicBlock();
2406   const BasicBlock *DstBB = Dst->getBasicBlock();
2407   if (!BPI) {
2408     // If BPI is not available, set the default probability as 1 / N, where N is
2409     // the number of successors.
2410     auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
2411     return BranchProbability(1, SuccSize);
2412   }
2413   return BPI->getEdgeProbability(SrcBB, DstBB);
2414 }
2415 
2416 void SelectionDAGBuilder::addSuccessorWithProb(MachineBasicBlock *Src,
2417                                                MachineBasicBlock *Dst,
2418                                                BranchProbability Prob) {
2419   if (!FuncInfo.BPI)
2420     Src->addSuccessorWithoutProb(Dst);
2421   else {
2422     if (Prob.isUnknown())
2423       Prob = getEdgeProbability(Src, Dst);
2424     Src->addSuccessor(Dst, Prob);
2425   }
2426 }
2427 
2428 static bool InBlock(const Value *V, const BasicBlock *BB) {
2429   if (const Instruction *I = dyn_cast<Instruction>(V))
2430     return I->getParent() == BB;
2431   return true;
2432 }
2433 
2434 /// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
2435 /// This function emits a branch and is used at the leaves of an OR or an
2436 /// AND operator tree.
2437 void
2438 SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
2439                                                   MachineBasicBlock *TBB,
2440                                                   MachineBasicBlock *FBB,
2441                                                   MachineBasicBlock *CurBB,
2442                                                   MachineBasicBlock *SwitchBB,
2443                                                   BranchProbability TProb,
2444                                                   BranchProbability FProb,
2445                                                   bool InvertCond) {
2446   const BasicBlock *BB = CurBB->getBasicBlock();
2447 
2448   // If the leaf of the tree is a comparison, merge the condition into
2449   // the caseblock.
2450   if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
2451     // The operands of the cmp have to be in this block.  We don't know
2452     // how to export them from some other block.  If this is the first block
2453     // of the sequence, no exporting is needed.
2454     if (CurBB == SwitchBB ||
2455         (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
2456          isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
2457       ISD::CondCode Condition;
2458       if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
2459         ICmpInst::Predicate Pred =
2460             InvertCond ? IC->getInversePredicate() : IC->getPredicate();
2461         Condition = getICmpCondCode(Pred);
2462       } else {
2463         const FCmpInst *FC = cast<FCmpInst>(Cond);
2464         FCmpInst::Predicate Pred =
2465             InvertCond ? FC->getInversePredicate() : FC->getPredicate();
2466         Condition = getFCmpCondCode(Pred);
2467         if (TM.Options.NoNaNsFPMath)
2468           Condition = getFCmpCodeWithoutNaN(Condition);
2469       }
2470 
2471       CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1), nullptr,
2472                    TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
2473       SL->SwitchCases.push_back(CB);
2474       return;
2475     }
2476   }
2477 
2478   // Create a CaseBlock record representing this branch.
2479   ISD::CondCode Opc = InvertCond ? ISD::SETNE : ISD::SETEQ;
2480   CaseBlock CB(Opc, Cond, ConstantInt::getTrue(*DAG.getContext()),
2481                nullptr, TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
2482   SL->SwitchCases.push_back(CB);
2483 }
2484 
2485 // Collect dependencies on V recursively. This is used for the cost analysis in
2486 // `shouldKeepJumpConditionsTogether`.
2487 static bool collectInstructionDeps(
2488     SmallMapVector<const Instruction *, bool, 8> *Deps, const Value *V,
2489     SmallMapVector<const Instruction *, bool, 8> *Necessary = nullptr,
2490     unsigned Depth = 0) {
2491   // Return false if we have an incomplete count.
2492   if (Depth >= SelectionDAG::MaxRecursionDepth)
2493     return false;
2494 
2495   auto *I = dyn_cast<Instruction>(V);
2496   if (I == nullptr)
2497     return true;
2498 
2499   if (Necessary != nullptr) {
2500     // This instruction is necessary for the other side of the condition so
2501     // don't count it.
2502     if (Necessary->contains(I))
2503       return true;
2504   }
2505 
2506   // Already added this dep.
2507   if (!Deps->try_emplace(I, false).second)
2508     return true;
2509 
2510   for (unsigned OpIdx = 0, E = I->getNumOperands(); OpIdx < E; ++OpIdx)
2511     if (!collectInstructionDeps(Deps, I->getOperand(OpIdx), Necessary,
2512                                 Depth + 1))
2513       return false;
2514   return true;
2515 }
2516 
2517 bool SelectionDAGBuilder::shouldKeepJumpConditionsTogether(
2518     const FunctionLoweringInfo &FuncInfo, const BranchInst &I,
2519     Instruction::BinaryOps Opc, const Value *Lhs, const Value *Rhs,
2520     TargetLoweringBase::CondMergingParams Params) const {
2521   if (I.getNumSuccessors() != 2)
2522     return false;
2523 
2524   if (!I.isConditional())
2525     return false;
2526 
2527   if (Params.BaseCost < 0)
2528     return false;
2529 
2530   // Baseline cost.
2531   InstructionCost CostThresh = Params.BaseCost;
2532 
2533   BranchProbabilityInfo *BPI = nullptr;
2534   if (Params.LikelyBias || Params.UnlikelyBias)
2535     BPI = FuncInfo.BPI;
2536   if (BPI != nullptr) {
2537     // See if we are either likely to get an early out or compute both lhs/rhs
2538     // of the condition.
2539     BasicBlock *IfFalse = I.getSuccessor(0);
2540     BasicBlock *IfTrue = I.getSuccessor(1);
2541 
2542     std::optional<bool> Likely;
2543     if (BPI->isEdgeHot(I.getParent(), IfTrue))
2544       Likely = true;
2545     else if (BPI->isEdgeHot(I.getParent(), IfFalse))
2546       Likely = false;
2547 
2548     if (Likely) {
2549       if (Opc == (*Likely ? Instruction::And : Instruction::Or))
2550         // Its likely we will have to compute both lhs and rhs of condition
2551         CostThresh += Params.LikelyBias;
2552       else {
2553         if (Params.UnlikelyBias < 0)
2554           return false;
2555         // Its likely we will get an early out.
2556         CostThresh -= Params.UnlikelyBias;
2557       }
2558     }
2559   }
2560 
2561   if (CostThresh <= 0)
2562     return false;
2563 
2564   // Collect "all" instructions that lhs condition is dependent on.
2565   // Use map for stable iteration (to avoid non-determanism of iteration of
2566   // SmallPtrSet). The `bool` value is just a dummy.
2567   SmallMapVector<const Instruction *, bool, 8> LhsDeps, RhsDeps;
2568   collectInstructionDeps(&LhsDeps, Lhs);
2569   // Collect "all" instructions that rhs condition is dependent on AND are
2570   // dependencies of lhs. This gives us an estimate on which instructions we
2571   // stand to save by splitting the condition.
2572   if (!collectInstructionDeps(&RhsDeps, Rhs, &LhsDeps))
2573     return false;
2574   // Add the compare instruction itself unless its a dependency on the LHS.
2575   if (const auto *RhsI = dyn_cast<Instruction>(Rhs))
2576     if (!LhsDeps.contains(RhsI))
2577       RhsDeps.try_emplace(RhsI, false);
2578 
2579   const auto &TLI = DAG.getTargetLoweringInfo();
2580   const auto &TTI =
2581       TLI.getTargetMachine().getTargetTransformInfo(*I.getFunction());
2582 
2583   InstructionCost CostOfIncluding = 0;
2584   // See if this instruction will need to computed independently of whether RHS
2585   // is.
2586   Value *BrCond = I.getCondition();
2587   auto ShouldCountInsn = [&RhsDeps, &BrCond](const Instruction *Ins) {
2588     for (const auto *U : Ins->users()) {
2589       // If user is independent of RHS calculation we don't need to count it.
2590       if (auto *UIns = dyn_cast<Instruction>(U))
2591         if (UIns != BrCond && !RhsDeps.contains(UIns))
2592           return false;
2593     }
2594     return true;
2595   };
2596 
2597   // Prune instructions from RHS Deps that are dependencies of unrelated
2598   // instructions. The value (SelectionDAG::MaxRecursionDepth) is fairly
2599   // arbitrary and just meant to cap the how much time we spend in the pruning
2600   // loop. Its highly unlikely to come into affect.
2601   const unsigned MaxPruneIters = SelectionDAG::MaxRecursionDepth;
2602   // Stop after a certain point. No incorrectness from including too many
2603   // instructions.
2604   for (unsigned PruneIters = 0; PruneIters < MaxPruneIters; ++PruneIters) {
2605     const Instruction *ToDrop = nullptr;
2606     for (const auto &InsPair : RhsDeps) {
2607       if (!ShouldCountInsn(InsPair.first)) {
2608         ToDrop = InsPair.first;
2609         break;
2610       }
2611     }
2612     if (ToDrop == nullptr)
2613       break;
2614     RhsDeps.erase(ToDrop);
2615   }
2616 
2617   for (const auto &InsPair : RhsDeps) {
2618     // Finally accumulate latency that we can only attribute to computing the
2619     // RHS condition. Use latency because we are essentially trying to calculate
2620     // the cost of the dependency chain.
2621     // Possible TODO: We could try to estimate ILP and make this more precise.
2622     CostOfIncluding +=
2623         TTI.getInstructionCost(InsPair.first, TargetTransformInfo::TCK_Latency);
2624 
2625     if (CostOfIncluding > CostThresh)
2626       return false;
2627   }
2628   return true;
2629 }
2630 
2631 void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
2632                                                MachineBasicBlock *TBB,
2633                                                MachineBasicBlock *FBB,
2634                                                MachineBasicBlock *CurBB,
2635                                                MachineBasicBlock *SwitchBB,
2636                                                Instruction::BinaryOps Opc,
2637                                                BranchProbability TProb,
2638                                                BranchProbability FProb,
2639                                                bool InvertCond) {
2640   // Skip over not part of the tree and remember to invert op and operands at
2641   // next level.
2642   Value *NotCond;
2643   if (match(Cond, m_OneUse(m_Not(m_Value(NotCond)))) &&
2644       InBlock(NotCond, CurBB->getBasicBlock())) {
2645     FindMergedConditions(NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
2646                          !InvertCond);
2647     return;
2648   }
2649 
2650   const Instruction *BOp = dyn_cast<Instruction>(Cond);
2651   const Value *BOpOp0, *BOpOp1;
2652   // Compute the effective opcode for Cond, taking into account whether it needs
2653   // to be inverted, e.g.
2654   //   and (not (or A, B)), C
2655   // gets lowered as
2656   //   and (and (not A, not B), C)
2657   Instruction::BinaryOps BOpc = (Instruction::BinaryOps)0;
2658   if (BOp) {
2659     BOpc = match(BOp, m_LogicalAnd(m_Value(BOpOp0), m_Value(BOpOp1)))
2660                ? Instruction::And
2661                : (match(BOp, m_LogicalOr(m_Value(BOpOp0), m_Value(BOpOp1)))
2662                       ? Instruction::Or
2663                       : (Instruction::BinaryOps)0);
2664     if (InvertCond) {
2665       if (BOpc == Instruction::And)
2666         BOpc = Instruction::Or;
2667       else if (BOpc == Instruction::Or)
2668         BOpc = Instruction::And;
2669     }
2670   }
2671 
2672   // If this node is not part of the or/and tree, emit it as a branch.
2673   // Note that all nodes in the tree should have same opcode.
2674   bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->hasOneUse();
2675   if (!BOpIsInOrAndTree || BOp->getParent() != CurBB->getBasicBlock() ||
2676       !InBlock(BOpOp0, CurBB->getBasicBlock()) ||
2677       !InBlock(BOpOp1, CurBB->getBasicBlock())) {
2678     EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB,
2679                                  TProb, FProb, InvertCond);
2680     return;
2681   }
2682 
2683   //  Create TmpBB after CurBB.
2684   MachineFunction::iterator BBI(CurBB);
2685   MachineFunction &MF = DAG.getMachineFunction();
2686   MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
2687   CurBB->getParent()->insert(++BBI, TmpBB);
2688 
2689   if (Opc == Instruction::Or) {
2690     // Codegen X | Y as:
2691     // BB1:
2692     //   jmp_if_X TBB
2693     //   jmp TmpBB
2694     // TmpBB:
2695     //   jmp_if_Y TBB
2696     //   jmp FBB
2697     //
2698 
2699     // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
2700     // The requirement is that
2701     //   TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
2702     //     = TrueProb for original BB.
2703     // Assuming the original probabilities are A and B, one choice is to set
2704     // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
2705     // A/(1+B) and 2B/(1+B). This choice assumes that
2706     //   TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
2707     // Another choice is to assume TrueProb for BB1 equals to TrueProb for
2708     // TmpBB, but the math is more complicated.
2709 
2710     auto NewTrueProb = TProb / 2;
2711     auto NewFalseProb = TProb / 2 + FProb;
2712     // Emit the LHS condition.
2713     FindMergedConditions(BOpOp0, TBB, TmpBB, CurBB, SwitchBB, Opc, NewTrueProb,
2714                          NewFalseProb, InvertCond);
2715 
2716     // Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
2717     SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb};
2718     BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
2719     // Emit the RHS condition into TmpBB.
2720     FindMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
2721                          Probs[1], InvertCond);
2722   } else {
2723     assert(Opc == Instruction::And && "Unknown merge op!");
2724     // Codegen X & Y as:
2725     // BB1:
2726     //   jmp_if_X TmpBB
2727     //   jmp FBB
2728     // TmpBB:
2729     //   jmp_if_Y TBB
2730     //   jmp FBB
2731     //
2732     //  This requires creation of TmpBB after CurBB.
2733 
2734     // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
2735     // The requirement is that
2736     //   FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
2737     //     = FalseProb for original BB.
2738     // Assuming the original probabilities are A and B, one choice is to set
2739     // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
2740     // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
2741     // TrueProb for BB1 * FalseProb for TmpBB.
2742 
2743     auto NewTrueProb = TProb + FProb / 2;
2744     auto NewFalseProb = FProb / 2;
2745     // Emit the LHS condition.
2746     FindMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB, Opc, NewTrueProb,
2747                          NewFalseProb, InvertCond);
2748 
2749     // Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
2750     SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2};
2751     BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
2752     // Emit the RHS condition into TmpBB.
2753     FindMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
2754                          Probs[1], InvertCond);
2755   }
2756 }
2757 
2758 /// If the set of cases should be emitted as a series of branches, return true.
2759 /// If we should emit this as a bunch of and/or'd together conditions, return
2760 /// false.
2761 bool
2762 SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases) {
2763   if (Cases.size() != 2) return true;
2764 
2765   // If this is two comparisons of the same values or'd or and'd together, they
2766   // will get folded into a single comparison, so don't emit two blocks.
2767   if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
2768        Cases[0].CmpRHS == Cases[1].CmpRHS) ||
2769       (Cases[0].CmpRHS == Cases[1].CmpLHS &&
2770        Cases[0].CmpLHS == Cases[1].CmpRHS)) {
2771     return false;
2772   }
2773 
2774   // Handle: (X != null) | (Y != null) --> (X|Y) != 0
2775   // Handle: (X == null) & (Y == null) --> (X|Y) == 0
2776   if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
2777       Cases[0].CC == Cases[1].CC &&
2778       isa<Constant>(Cases[0].CmpRHS) &&
2779       cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
2780     if (Cases[0].CC == ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
2781       return false;
2782     if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
2783       return false;
2784   }
2785 
2786   return true;
2787 }
2788 
2789 void SelectionDAGBuilder::visitBr(const BranchInst &I) {
2790   MachineBasicBlock *BrMBB = FuncInfo.MBB;
2791 
2792   // Update machine-CFG edges.
2793   MachineBasicBlock *Succ0MBB = FuncInfo.getMBB(I.getSuccessor(0));
2794 
2795   if (I.isUnconditional()) {
2796     // Update machine-CFG edges.
2797     BrMBB->addSuccessor(Succ0MBB);
2798 
2799     // If this is not a fall-through branch or optimizations are switched off,
2800     // emit the branch.
2801     if (Succ0MBB != NextBlock(BrMBB) ||
2802         TM.getOptLevel() == CodeGenOptLevel::None) {
2803       auto Br = DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
2804                             getControlRoot(), DAG.getBasicBlock(Succ0MBB));
2805       setValue(&I, Br);
2806       DAG.setRoot(Br);
2807     }
2808 
2809     return;
2810   }
2811 
2812   // If this condition is one of the special cases we handle, do special stuff
2813   // now.
2814   const Value *CondVal = I.getCondition();
2815   MachineBasicBlock *Succ1MBB = FuncInfo.getMBB(I.getSuccessor(1));
2816 
2817   // If this is a series of conditions that are or'd or and'd together, emit
2818   // this as a sequence of branches instead of setcc's with and/or operations.
2819   // As long as jumps are not expensive (exceptions for multi-use logic ops,
2820   // unpredictable branches, and vector extracts because those jumps are likely
2821   // expensive for any target), this should improve performance.
2822   // For example, instead of something like:
2823   //     cmp A, B
2824   //     C = seteq
2825   //     cmp D, E
2826   //     F = setle
2827   //     or C, F
2828   //     jnz foo
2829   // Emit:
2830   //     cmp A, B
2831   //     je foo
2832   //     cmp D, E
2833   //     jle foo
2834   bool IsUnpredictable = I.hasMetadata(LLVMContext::MD_unpredictable);
2835   const Instruction *BOp = dyn_cast<Instruction>(CondVal);
2836   if (!DAG.getTargetLoweringInfo().isJumpExpensive() && BOp &&
2837       BOp->hasOneUse() && !IsUnpredictable) {
2838     Value *Vec;
2839     const Value *BOp0, *BOp1;
2840     Instruction::BinaryOps Opcode = (Instruction::BinaryOps)0;
2841     if (match(BOp, m_LogicalAnd(m_Value(BOp0), m_Value(BOp1))))
2842       Opcode = Instruction::And;
2843     else if (match(BOp, m_LogicalOr(m_Value(BOp0), m_Value(BOp1))))
2844       Opcode = Instruction::Or;
2845 
2846     if (Opcode &&
2847         !(match(BOp0, m_ExtractElt(m_Value(Vec), m_Value())) &&
2848           match(BOp1, m_ExtractElt(m_Specific(Vec), m_Value()))) &&
2849         !shouldKeepJumpConditionsTogether(
2850             FuncInfo, I, Opcode, BOp0, BOp1,
2851             DAG.getTargetLoweringInfo().getJumpConditionMergingParams(
2852                 Opcode, BOp0, BOp1))) {
2853       FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB, Opcode,
2854                            getEdgeProbability(BrMBB, Succ0MBB),
2855                            getEdgeProbability(BrMBB, Succ1MBB),
2856                            /*InvertCond=*/false);
2857       // If the compares in later blocks need to use values not currently
2858       // exported from this block, export them now.  This block should always
2859       // be the first entry.
2860       assert(SL->SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!");
2861 
2862       // Allow some cases to be rejected.
2863       if (ShouldEmitAsBranches(SL->SwitchCases)) {
2864         for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i) {
2865           ExportFromCurrentBlock(SL->SwitchCases[i].CmpLHS);
2866           ExportFromCurrentBlock(SL->SwitchCases[i].CmpRHS);
2867         }
2868 
2869         // Emit the branch for this block.
2870         visitSwitchCase(SL->SwitchCases[0], BrMBB);
2871         SL->SwitchCases.erase(SL->SwitchCases.begin());
2872         return;
2873       }
2874 
2875       // Okay, we decided not to do this, remove any inserted MBB's and clear
2876       // SwitchCases.
2877       for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i)
2878         FuncInfo.MF->erase(SL->SwitchCases[i].ThisBB);
2879 
2880       SL->SwitchCases.clear();
2881     }
2882   }
2883 
2884   // Create a CaseBlock record representing this branch.
2885   CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
2886                nullptr, Succ0MBB, Succ1MBB, BrMBB, getCurSDLoc(),
2887                BranchProbability::getUnknown(), BranchProbability::getUnknown(),
2888                IsUnpredictable);
2889 
2890   // Use visitSwitchCase to actually insert the fast branch sequence for this
2891   // cond branch.
2892   visitSwitchCase(CB, BrMBB);
2893 }
2894 
2895 /// visitSwitchCase - Emits the necessary code to represent a single node in
2896 /// the binary search tree resulting from lowering a switch instruction.
2897 void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB,
2898                                           MachineBasicBlock *SwitchBB) {
2899   SDValue Cond;
2900   SDValue CondLHS = getValue(CB.CmpLHS);
2901   SDLoc dl = CB.DL;
2902 
2903   if (CB.CC == ISD::SETTRUE) {
2904     // Branch or fall through to TrueBB.
2905     addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
2906     SwitchBB->normalizeSuccProbs();
2907     if (CB.TrueBB != NextBlock(SwitchBB)) {
2908       DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, getControlRoot(),
2909                               DAG.getBasicBlock(CB.TrueBB)));
2910     }
2911     return;
2912   }
2913 
2914   auto &TLI = DAG.getTargetLoweringInfo();
2915   EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), CB.CmpLHS->getType());
2916 
2917   // Build the setcc now.
2918   if (!CB.CmpMHS) {
2919     // Fold "(X == true)" to X and "(X == false)" to !X to
2920     // handle common cases produced by branch lowering.
2921     if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
2922         CB.CC == ISD::SETEQ)
2923       Cond = CondLHS;
2924     else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) &&
2925              CB.CC == ISD::SETEQ) {
2926       SDValue True = DAG.getConstant(1, dl, CondLHS.getValueType());
2927       Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
2928     } else {
2929       SDValue CondRHS = getValue(CB.CmpRHS);
2930 
2931       // If a pointer's DAG type is larger than its memory type then the DAG
2932       // values are zero-extended. This breaks signed comparisons so truncate
2933       // back to the underlying type before doing the compare.
2934       if (CondLHS.getValueType() != MemVT) {
2935         CondLHS = DAG.getPtrExtOrTrunc(CondLHS, getCurSDLoc(), MemVT);
2936         CondRHS = DAG.getPtrExtOrTrunc(CondRHS, getCurSDLoc(), MemVT);
2937       }
2938       Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, CondRHS, CB.CC);
2939     }
2940   } else {
2941     assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
2942 
2943     const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
2944     const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
2945 
2946     SDValue CmpOp = getValue(CB.CmpMHS);
2947     EVT VT = CmpOp.getValueType();
2948 
2949     if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
2950       Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, dl, VT),
2951                           ISD::SETLE);
2952     } else {
2953       SDValue SUB = DAG.getNode(ISD::SUB, dl,
2954                                 VT, CmpOp, DAG.getConstant(Low, dl, VT));
2955       Cond = DAG.getSetCC(dl, MVT::i1, SUB,
2956                           DAG.getConstant(High-Low, dl, VT), ISD::SETULE);
2957     }
2958   }
2959 
2960   // Update successor info
2961   addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
2962   // TrueBB and FalseBB are always different unless the incoming IR is
2963   // degenerate. This only happens when running llc on weird IR.
2964   if (CB.TrueBB != CB.FalseBB)
2965     addSuccessorWithProb(SwitchBB, CB.FalseBB, CB.FalseProb);
2966   SwitchBB->normalizeSuccProbs();
2967 
2968   // If the lhs block is the next block, invert the condition so that we can
2969   // fall through to the lhs instead of the rhs block.
2970   if (CB.TrueBB == NextBlock(SwitchBB)) {
2971     std::swap(CB.TrueBB, CB.FalseBB);
2972     SDValue True = DAG.getConstant(1, dl, Cond.getValueType());
2973     Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
2974   }
2975 
2976   SDNodeFlags Flags;
2977   Flags.setUnpredictable(CB.IsUnpredictable);
2978   SDValue BrCond = DAG.getNode(ISD::BRCOND, dl, MVT::Other, getControlRoot(),
2979                                Cond, DAG.getBasicBlock(CB.TrueBB), Flags);
2980 
2981   setValue(CurInst, BrCond);
2982 
2983   // Insert the false branch. Do this even if it's a fall through branch,
2984   // this makes it easier to do DAG optimizations which require inverting
2985   // the branch condition.
2986   BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2987                        DAG.getBasicBlock(CB.FalseBB));
2988 
2989   DAG.setRoot(BrCond);
2990 }
2991 
2992 /// visitJumpTable - Emit JumpTable node in the current MBB
2993 void SelectionDAGBuilder::visitJumpTable(SwitchCG::JumpTable &JT) {
2994   // Emit the code for the jump table
2995   assert(JT.SL && "Should set SDLoc for SelectionDAG!");
2996   assert(JT.Reg && "Should lower JT Header first!");
2997   EVT PTy = DAG.getTargetLoweringInfo().getJumpTableRegTy(DAG.getDataLayout());
2998   SDValue Index = DAG.getCopyFromReg(getControlRoot(), *JT.SL, JT.Reg, PTy);
2999   SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
3000   SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, *JT.SL, MVT::Other,
3001                                     Index.getValue(1), Table, Index);
3002   DAG.setRoot(BrJumpTable);
3003 }
3004 
3005 /// visitJumpTableHeader - This function emits necessary code to produce index
3006 /// in the JumpTable from switch case.
3007 void SelectionDAGBuilder::visitJumpTableHeader(SwitchCG::JumpTable &JT,
3008                                                JumpTableHeader &JTH,
3009                                                MachineBasicBlock *SwitchBB) {
3010   assert(JT.SL && "Should set SDLoc for SelectionDAG!");
3011   const SDLoc &dl = *JT.SL;
3012 
3013   // Subtract the lowest switch case value from the value being switched on.
3014   SDValue SwitchOp = getValue(JTH.SValue);
3015   EVT VT = SwitchOp.getValueType();
3016   SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp,
3017                             DAG.getConstant(JTH.First, dl, VT));
3018 
3019   // The SDNode we just created, which holds the value being switched on minus
3020   // the smallest case value, needs to be copied to a virtual register so it
3021   // can be used as an index into the jump table in a subsequent basic block.
3022   // This value may be smaller or larger than the target's pointer type, and
3023   // therefore require extension or truncating.
3024   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3025   SwitchOp =
3026       DAG.getZExtOrTrunc(Sub, dl, TLI.getJumpTableRegTy(DAG.getDataLayout()));
3027 
3028   Register JumpTableReg =
3029       FuncInfo.CreateReg(TLI.getJumpTableRegTy(DAG.getDataLayout()));
3030   SDValue CopyTo =
3031       DAG.getCopyToReg(getControlRoot(), dl, JumpTableReg, SwitchOp);
3032   JT.Reg = JumpTableReg;
3033 
3034   if (!JTH.FallthroughUnreachable) {
3035     // Emit the range check for the jump table, and branch to the default block
3036     // for the switch statement if the value being switched on exceeds the
3037     // largest case in the switch.
3038     SDValue CMP = DAG.getSetCC(
3039         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
3040                                    Sub.getValueType()),
3041         Sub, DAG.getConstant(JTH.Last - JTH.First, dl, VT), ISD::SETUGT);
3042 
3043     SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
3044                                  MVT::Other, CopyTo, CMP,
3045                                  DAG.getBasicBlock(JT.Default));
3046 
3047     // Avoid emitting unnecessary branches to the next block.
3048     if (JT.MBB != NextBlock(SwitchBB))
3049       BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
3050                            DAG.getBasicBlock(JT.MBB));
3051 
3052     DAG.setRoot(BrCond);
3053   } else {
3054     // Avoid emitting unnecessary branches to the next block.
3055     if (JT.MBB != NextBlock(SwitchBB))
3056       DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, CopyTo,
3057                               DAG.getBasicBlock(JT.MBB)));
3058     else
3059       DAG.setRoot(CopyTo);
3060   }
3061 }
3062 
3063 /// Create a LOAD_STACK_GUARD node, and let it carry the target specific global
3064 /// variable if there exists one.
3065 static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL,
3066                                  SDValue &Chain) {
3067   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3068   EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
3069   EVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout());
3070   MachineFunction &MF = DAG.getMachineFunction();
3071   Value *Global = TLI.getSDagStackGuard(*MF.getFunction().getParent());
3072   MachineSDNode *Node =
3073       DAG.getMachineNode(TargetOpcode::LOAD_STACK_GUARD, DL, PtrTy, Chain);
3074   if (Global) {
3075     MachinePointerInfo MPInfo(Global);
3076     auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
3077                  MachineMemOperand::MODereferenceable;
3078     MachineMemOperand *MemRef = MF.getMachineMemOperand(
3079         MPInfo, Flags, LocationSize::precise(PtrTy.getSizeInBits() / 8),
3080         DAG.getEVTAlign(PtrTy));
3081     DAG.setNodeMemRefs(Node, {MemRef});
3082   }
3083   if (PtrTy != PtrMemTy)
3084     return DAG.getPtrExtOrTrunc(SDValue(Node, 0), DL, PtrMemTy);
3085   return SDValue(Node, 0);
3086 }
3087 
3088 /// Codegen a new tail for a stack protector check ParentMBB which has had its
3089 /// tail spliced into a stack protector check success bb.
3090 ///
3091 /// For a high level explanation of how this fits into the stack protector
3092 /// generation see the comment on the declaration of class
3093 /// StackProtectorDescriptor.
3094 void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
3095                                                   MachineBasicBlock *ParentBB) {
3096 
3097   // First create the loads to the guard/stack slot for the comparison.
3098   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3099   EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
3100   EVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout());
3101 
3102   MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
3103   int FI = MFI.getStackProtectorIndex();
3104 
3105   SDValue Guard;
3106   SDLoc dl = getCurSDLoc();
3107   SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy);
3108   const Module &M = *ParentBB->getParent()->getFunction().getParent();
3109   Align Align =
3110       DAG.getDataLayout().getPrefTypeAlign(PointerType::get(M.getContext(), 0));
3111 
3112   // Generate code to load the content of the guard slot.
3113   SDValue GuardVal = DAG.getLoad(
3114       PtrMemTy, dl, DAG.getEntryNode(), StackSlotPtr,
3115       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), Align,
3116       MachineMemOperand::MOVolatile);
3117 
3118   if (TLI.useStackGuardXorFP())
3119     GuardVal = TLI.emitStackGuardXorFP(DAG, GuardVal, dl);
3120 
3121   // Retrieve guard check function, nullptr if instrumentation is inlined.
3122   if (const Function *GuardCheckFn = TLI.getSSPStackGuardCheck(M)) {
3123     // The target provides a guard check function to validate the guard value.
3124     // Generate a call to that function with the content of the guard slot as
3125     // argument.
3126     FunctionType *FnTy = GuardCheckFn->getFunctionType();
3127     assert(FnTy->getNumParams() == 1 && "Invalid function signature");
3128 
3129     TargetLowering::ArgListTy Args;
3130     TargetLowering::ArgListEntry Entry;
3131     Entry.Node = GuardVal;
3132     Entry.Ty = FnTy->getParamType(0);
3133     if (GuardCheckFn->hasParamAttribute(0, Attribute::AttrKind::InReg))
3134       Entry.IsInReg = true;
3135     Args.push_back(Entry);
3136 
3137     TargetLowering::CallLoweringInfo CLI(DAG);
3138     CLI.setDebugLoc(getCurSDLoc())
3139         .setChain(DAG.getEntryNode())
3140         .setCallee(GuardCheckFn->getCallingConv(), FnTy->getReturnType(),
3141                    getValue(GuardCheckFn), std::move(Args));
3142 
3143     std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
3144     DAG.setRoot(Result.second);
3145     return;
3146   }
3147 
3148   // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
3149   // Otherwise, emit a volatile load to retrieve the stack guard value.
3150   SDValue Chain = DAG.getEntryNode();
3151   if (TLI.useLoadStackGuardNode(M)) {
3152     Guard = getLoadStackGuard(DAG, dl, Chain);
3153   } else {
3154     const Value *IRGuard = TLI.getSDagStackGuard(M);
3155     SDValue GuardPtr = getValue(IRGuard);
3156 
3157     Guard = DAG.getLoad(PtrMemTy, dl, Chain, GuardPtr,
3158                         MachinePointerInfo(IRGuard, 0), Align,
3159                         MachineMemOperand::MOVolatile);
3160   }
3161 
3162   // Perform the comparison via a getsetcc.
3163   SDValue Cmp = DAG.getSetCC(dl, TLI.getSetCCResultType(DAG.getDataLayout(),
3164                                                         *DAG.getContext(),
3165                                                         Guard.getValueType()),
3166                              Guard, GuardVal, ISD::SETNE);
3167 
3168   // If the guard/stackslot do not equal, branch to failure MBB.
3169   SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
3170                                MVT::Other, GuardVal.getOperand(0),
3171                                Cmp, DAG.getBasicBlock(SPD.getFailureMBB()));
3172   // Otherwise branch to success MBB.
3173   SDValue Br = DAG.getNode(ISD::BR, dl,
3174                            MVT::Other, BrCond,
3175                            DAG.getBasicBlock(SPD.getSuccessMBB()));
3176 
3177   DAG.setRoot(Br);
3178 }
3179 
3180 /// Codegen the failure basic block for a stack protector check.
3181 ///
3182 /// A failure stack protector machine basic block consists simply of a call to
3183 /// __stack_chk_fail().
3184 ///
3185 /// For a high level explanation of how this fits into the stack protector
3186 /// generation see the comment on the declaration of class
3187 /// StackProtectorDescriptor.
3188 void
3189 SelectionDAGBuilder::visitSPDescriptorFailure(StackProtectorDescriptor &SPD) {
3190   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3191   TargetLowering::MakeLibCallOptions CallOptions;
3192   CallOptions.setDiscardResult(true);
3193   SDValue Chain = TLI.makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL,
3194                                   MVT::isVoid, {}, CallOptions, getCurSDLoc())
3195                       .second;
3196 
3197   // Emit a trap instruction if we are required to do so.
3198   const TargetOptions &TargetOpts = DAG.getTarget().Options;
3199   if (TargetOpts.TrapUnreachable && !TargetOpts.NoTrapAfterNoreturn)
3200     Chain = DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, Chain);
3201 
3202   DAG.setRoot(Chain);
3203 }
3204 
3205 /// visitBitTestHeader - This function emits necessary code to produce value
3206 /// suitable for "bit tests"
3207 void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
3208                                              MachineBasicBlock *SwitchBB) {
3209   SDLoc dl = getCurSDLoc();
3210 
3211   // Subtract the minimum value.
3212   SDValue SwitchOp = getValue(B.SValue);
3213   EVT VT = SwitchOp.getValueType();
3214   SDValue RangeSub =
3215       DAG.getNode(ISD::SUB, dl, VT, SwitchOp, DAG.getConstant(B.First, dl, VT));
3216 
3217   // Determine the type of the test operands.
3218   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3219   bool UsePtrType = false;
3220   if (!TLI.isTypeLegal(VT)) {
3221     UsePtrType = true;
3222   } else {
3223     for (unsigned i = 0, e = B.Cases.size(); i != e; ++i)
3224       if (!isUIntN(VT.getSizeInBits(), B.Cases[i].Mask)) {
3225         // Switch table case range are encoded into series of masks.
3226         // Just use pointer type, it's guaranteed to fit.
3227         UsePtrType = true;
3228         break;
3229       }
3230   }
3231   SDValue Sub = RangeSub;
3232   if (UsePtrType) {
3233     VT = TLI.getPointerTy(DAG.getDataLayout());
3234     Sub = DAG.getZExtOrTrunc(Sub, dl, VT);
3235   }
3236 
3237   B.RegVT = VT.getSimpleVT();
3238   B.Reg = FuncInfo.CreateReg(B.RegVT);
3239   SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl, B.Reg, Sub);
3240 
3241   MachineBasicBlock* MBB = B.Cases[0].ThisBB;
3242 
3243   if (!B.FallthroughUnreachable)
3244     addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb);
3245   addSuccessorWithProb(SwitchBB, MBB, B.Prob);
3246   SwitchBB->normalizeSuccProbs();
3247 
3248   SDValue Root = CopyTo;
3249   if (!B.FallthroughUnreachable) {
3250     // Conditional branch to the default block.
3251     SDValue RangeCmp = DAG.getSetCC(dl,
3252         TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
3253                                RangeSub.getValueType()),
3254         RangeSub, DAG.getConstant(B.Range, dl, RangeSub.getValueType()),
3255         ISD::SETUGT);
3256 
3257     Root = DAG.getNode(ISD::BRCOND, dl, MVT::Other, Root, RangeCmp,
3258                        DAG.getBasicBlock(B.Default));
3259   }
3260 
3261   // Avoid emitting unnecessary branches to the next block.
3262   if (MBB != NextBlock(SwitchBB))
3263     Root = DAG.getNode(ISD::BR, dl, MVT::Other, Root, DAG.getBasicBlock(MBB));
3264 
3265   DAG.setRoot(Root);
3266 }
3267 
3268 /// visitBitTestCase - this function produces one "bit test"
3269 void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB,
3270                                            MachineBasicBlock *NextMBB,
3271                                            BranchProbability BranchProbToNext,
3272                                            Register Reg, BitTestCase &B,
3273                                            MachineBasicBlock *SwitchBB) {
3274   SDLoc dl = getCurSDLoc();
3275   MVT VT = BB.RegVT;
3276   SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), dl, Reg, VT);
3277   SDValue Cmp;
3278   unsigned PopCount = llvm::popcount(B.Mask);
3279   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3280   if (PopCount == 1) {
3281     // Testing for a single bit; just compare the shift count with what it
3282     // would need to be to shift a 1 bit in that position.
3283     Cmp = DAG.getSetCC(
3284         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
3285         ShiftOp, DAG.getConstant(llvm::countr_zero(B.Mask), dl, VT),
3286         ISD::SETEQ);
3287   } else if (PopCount == BB.Range) {
3288     // There is only one zero bit in the range, test for it directly.
3289     Cmp = DAG.getSetCC(
3290         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
3291         ShiftOp, DAG.getConstant(llvm::countr_one(B.Mask), dl, VT), ISD::SETNE);
3292   } else {
3293     // Make desired shift
3294     SDValue SwitchVal = DAG.getNode(ISD::SHL, dl, VT,
3295                                     DAG.getConstant(1, dl, VT), ShiftOp);
3296 
3297     // Emit bit tests and jumps
3298     SDValue AndOp = DAG.getNode(ISD::AND, dl,
3299                                 VT, SwitchVal, DAG.getConstant(B.Mask, dl, VT));
3300     Cmp = DAG.getSetCC(
3301         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
3302         AndOp, DAG.getConstant(0, dl, VT), ISD::SETNE);
3303   }
3304 
3305   // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
3306   addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb);
3307   // The branch probability from SwitchBB to NextMBB is BranchProbToNext.
3308   addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
3309   // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is
3310   // one as they are relative probabilities (and thus work more like weights),
3311   // and hence we need to normalize them to let the sum of them become one.
3312   SwitchBB->normalizeSuccProbs();
3313 
3314   SDValue BrAnd = DAG.getNode(ISD::BRCOND, dl,
3315                               MVT::Other, getControlRoot(),
3316                               Cmp, DAG.getBasicBlock(B.TargetBB));
3317 
3318   // Avoid emitting unnecessary branches to the next block.
3319   if (NextMBB != NextBlock(SwitchBB))
3320     BrAnd = DAG.getNode(ISD::BR, dl, MVT::Other, BrAnd,
3321                         DAG.getBasicBlock(NextMBB));
3322 
3323   DAG.setRoot(BrAnd);
3324 }
3325 
3326 void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
3327   MachineBasicBlock *InvokeMBB = FuncInfo.MBB;
3328 
3329   // Retrieve successors. Look through artificial IR level blocks like
3330   // catchswitch for successors.
3331   MachineBasicBlock *Return = FuncInfo.getMBB(I.getSuccessor(0));
3332   const BasicBlock *EHPadBB = I.getSuccessor(1);
3333   MachineBasicBlock *EHPadMBB = FuncInfo.getMBB(EHPadBB);
3334 
3335   // Deopt and ptrauth bundles are lowered in helper functions, and we don't
3336   // have to do anything here to lower funclet bundles.
3337   assert(!I.hasOperandBundlesOtherThan(
3338              {LLVMContext::OB_deopt, LLVMContext::OB_gc_transition,
3339               LLVMContext::OB_gc_live, LLVMContext::OB_funclet,
3340               LLVMContext::OB_cfguardtarget, LLVMContext::OB_ptrauth,
3341               LLVMContext::OB_clang_arc_attachedcall}) &&
3342          "Cannot lower invokes with arbitrary operand bundles yet!");
3343 
3344   const Value *Callee(I.getCalledOperand());
3345   const Function *Fn = dyn_cast<Function>(Callee);
3346   if (isa<InlineAsm>(Callee))
3347     visitInlineAsm(I, EHPadBB);
3348   else if (Fn && Fn->isIntrinsic()) {
3349     switch (Fn->getIntrinsicID()) {
3350     default:
3351       llvm_unreachable("Cannot invoke this intrinsic");
3352     case Intrinsic::donothing:
3353       // Ignore invokes to @llvm.donothing: jump directly to the next BB.
3354     case Intrinsic::seh_try_begin:
3355     case Intrinsic::seh_scope_begin:
3356     case Intrinsic::seh_try_end:
3357     case Intrinsic::seh_scope_end:
3358       if (EHPadMBB)
3359           // a block referenced by EH table
3360           // so dtor-funclet not removed by opts
3361           EHPadMBB->setMachineBlockAddressTaken();
3362       break;
3363     case Intrinsic::experimental_patchpoint_void:
3364     case Intrinsic::experimental_patchpoint:
3365       visitPatchpoint(I, EHPadBB);
3366       break;
3367     case Intrinsic::experimental_gc_statepoint:
3368       LowerStatepoint(cast<GCStatepointInst>(I), EHPadBB);
3369       break;
3370     case Intrinsic::wasm_rethrow: {
3371       // This is usually done in visitTargetIntrinsic, but this intrinsic is
3372       // special because it can be invoked, so we manually lower it to a DAG
3373       // node here.
3374       SmallVector<SDValue, 8> Ops;
3375       Ops.push_back(getControlRoot()); // inchain for the terminator node
3376       const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3377       Ops.push_back(
3378           DAG.getTargetConstant(Intrinsic::wasm_rethrow, getCurSDLoc(),
3379                                 TLI.getPointerTy(DAG.getDataLayout())));
3380       SDVTList VTs = DAG.getVTList(ArrayRef<EVT>({MVT::Other})); // outchain
3381       DAG.setRoot(DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops));
3382       break;
3383     }
3384     }
3385   } else if (I.hasDeoptState()) {
3386     // Currently we do not lower any intrinsic calls with deopt operand bundles.
3387     // Eventually we will support lowering the @llvm.experimental.deoptimize
3388     // intrinsic, and right now there are no plans to support other intrinsics
3389     // with deopt state.
3390     LowerCallSiteWithDeoptBundle(&I, getValue(Callee), EHPadBB);
3391   } else if (I.countOperandBundlesOfType(LLVMContext::OB_ptrauth)) {
3392     LowerCallSiteWithPtrAuthBundle(cast<CallBase>(I), EHPadBB);
3393   } else {
3394     LowerCallTo(I, getValue(Callee), false, false, EHPadBB);
3395   }
3396 
3397   // If the value of the invoke is used outside of its defining block, make it
3398   // available as a virtual register.
3399   // We already took care of the exported value for the statepoint instruction
3400   // during call to the LowerStatepoint.
3401   if (!isa<GCStatepointInst>(I)) {
3402     CopyToExportRegsIfNeeded(&I);
3403   }
3404 
3405   SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
3406   BranchProbabilityInfo *BPI = FuncInfo.BPI;
3407   BranchProbability EHPadBBProb =
3408       BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB)
3409           : BranchProbability::getZero();
3410   findUnwindDestinations(FuncInfo, EHPadBB, EHPadBBProb, UnwindDests);
3411 
3412   // Update successor info.
3413   addSuccessorWithProb(InvokeMBB, Return);
3414   for (auto &UnwindDest : UnwindDests) {
3415     UnwindDest.first->setIsEHPad();
3416     addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
3417   }
3418   InvokeMBB->normalizeSuccProbs();
3419 
3420   // Drop into normal successor.
3421   DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, getControlRoot(),
3422                           DAG.getBasicBlock(Return)));
3423 }
3424 
3425 void SelectionDAGBuilder::visitCallBr(const CallBrInst &I) {
3426   MachineBasicBlock *CallBrMBB = FuncInfo.MBB;
3427 
3428   // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
3429   // have to do anything here to lower funclet bundles.
3430   assert(!I.hasOperandBundlesOtherThan(
3431              {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) &&
3432          "Cannot lower callbrs with arbitrary operand bundles yet!");
3433 
3434   assert(I.isInlineAsm() && "Only know how to handle inlineasm callbr");
3435   visitInlineAsm(I);
3436   CopyToExportRegsIfNeeded(&I);
3437 
3438   // Retrieve successors.
3439   SmallPtrSet<BasicBlock *, 8> Dests;
3440   Dests.insert(I.getDefaultDest());
3441   MachineBasicBlock *Return = FuncInfo.getMBB(I.getDefaultDest());
3442 
3443   // Update successor info.
3444   addSuccessorWithProb(CallBrMBB, Return, BranchProbability::getOne());
3445   for (unsigned i = 0, e = I.getNumIndirectDests(); i < e; ++i) {
3446     BasicBlock *Dest = I.getIndirectDest(i);
3447     MachineBasicBlock *Target = FuncInfo.getMBB(Dest);
3448     Target->setIsInlineAsmBrIndirectTarget();
3449     Target->setMachineBlockAddressTaken();
3450     Target->setLabelMustBeEmitted();
3451     // Don't add duplicate machine successors.
3452     if (Dests.insert(Dest).second)
3453       addSuccessorWithProb(CallBrMBB, Target, BranchProbability::getZero());
3454   }
3455   CallBrMBB->normalizeSuccProbs();
3456 
3457   // Drop into default successor.
3458   DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
3459                           MVT::Other, getControlRoot(),
3460                           DAG.getBasicBlock(Return)));
3461 }
3462 
3463 void SelectionDAGBuilder::visitResume(const ResumeInst &RI) {
3464   llvm_unreachable("SelectionDAGBuilder shouldn't visit resume instructions!");
3465 }
3466 
3467 void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) {
3468   assert(FuncInfo.MBB->isEHPad() &&
3469          "Call to landingpad not in landing pad!");
3470 
3471   // If there aren't registers to copy the values into (e.g., during SjLj
3472   // exceptions), then don't bother to create these DAG nodes.
3473   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3474   const Constant *PersonalityFn = FuncInfo.Fn->getPersonalityFn();
3475   if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
3476       TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
3477     return;
3478 
3479   // If landingpad's return type is token type, we don't create DAG nodes
3480   // for its exception pointer and selector value. The extraction of exception
3481   // pointer or selector value from token type landingpads is not currently
3482   // supported.
3483   if (LP.getType()->isTokenTy())
3484     return;
3485 
3486   SmallVector<EVT, 2> ValueVTs;
3487   SDLoc dl = getCurSDLoc();
3488   ComputeValueVTs(TLI, DAG.getDataLayout(), LP.getType(), ValueVTs);
3489   assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported");
3490 
3491   // Get the two live-in registers as SDValues. The physregs have already been
3492   // copied into virtual registers.
3493   SDValue Ops[2];
3494   if (FuncInfo.ExceptionPointerVirtReg) {
3495     Ops[0] = DAG.getZExtOrTrunc(
3496         DAG.getCopyFromReg(DAG.getEntryNode(), dl,
3497                            FuncInfo.ExceptionPointerVirtReg,
3498                            TLI.getPointerTy(DAG.getDataLayout())),
3499         dl, ValueVTs[0]);
3500   } else {
3501     Ops[0] = DAG.getConstant(0, dl, TLI.getPointerTy(DAG.getDataLayout()));
3502   }
3503   Ops[1] = DAG.getZExtOrTrunc(
3504       DAG.getCopyFromReg(DAG.getEntryNode(), dl,
3505                          FuncInfo.ExceptionSelectorVirtReg,
3506                          TLI.getPointerTy(DAG.getDataLayout())),
3507       dl, ValueVTs[1]);
3508 
3509   // Merge into one.
3510   SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl,
3511                             DAG.getVTList(ValueVTs), Ops);
3512   setValue(&LP, Res);
3513 }
3514 
3515 void SelectionDAGBuilder::UpdateSplitBlock(MachineBasicBlock *First,
3516                                            MachineBasicBlock *Last) {
3517   // Update JTCases.
3518   for (JumpTableBlock &JTB : SL->JTCases)
3519     if (JTB.first.HeaderBB == First)
3520       JTB.first.HeaderBB = Last;
3521 
3522   // Update BitTestCases.
3523   for (BitTestBlock &BTB : SL->BitTestCases)
3524     if (BTB.Parent == First)
3525       BTB.Parent = Last;
3526 }
3527 
3528 void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
3529   MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB;
3530 
3531   // Update machine-CFG edges with unique successors.
3532   SmallSet<BasicBlock*, 32> Done;
3533   for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) {
3534     BasicBlock *BB = I.getSuccessor(i);
3535     bool Inserted = Done.insert(BB).second;
3536     if (!Inserted)
3537         continue;
3538 
3539     MachineBasicBlock *Succ = FuncInfo.getMBB(BB);
3540     addSuccessorWithProb(IndirectBrMBB, Succ);
3541   }
3542   IndirectBrMBB->normalizeSuccProbs();
3543 
3544   DAG.setRoot(DAG.getNode(ISD::BRIND, getCurSDLoc(),
3545                           MVT::Other, getControlRoot(),
3546                           getValue(I.getAddress())));
3547 }
3548 
3549 void SelectionDAGBuilder::visitUnreachable(const UnreachableInst &I) {
3550   if (!DAG.getTarget().Options.TrapUnreachable)
3551     return;
3552 
3553   // We may be able to ignore unreachable behind a noreturn call.
3554   if (const CallInst *Call = dyn_cast_or_null<CallInst>(I.getPrevNode());
3555       Call && Call->doesNotReturn()) {
3556     if (DAG.getTarget().Options.NoTrapAfterNoreturn)
3557       return;
3558     // Do not emit an additional trap instruction.
3559     if (Call->isNonContinuableTrap())
3560       return;
3561   }
3562 
3563   DAG.setRoot(DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, DAG.getRoot()));
3564 }
3565 
3566 void SelectionDAGBuilder::visitUnary(const User &I, unsigned Opcode) {
3567   SDNodeFlags Flags;
3568   if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
3569     Flags.copyFMF(*FPOp);
3570 
3571   SDValue Op = getValue(I.getOperand(0));
3572   SDValue UnNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op.getValueType(),
3573                                     Op, Flags);
3574   setValue(&I, UnNodeValue);
3575 }
3576 
3577 void SelectionDAGBuilder::visitBinary(const User &I, unsigned Opcode) {
3578   SDNodeFlags Flags;
3579   if (auto *OFBinOp = dyn_cast<OverflowingBinaryOperator>(&I)) {
3580     Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap());
3581     Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap());
3582   }
3583   if (auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I))
3584     Flags.setExact(ExactOp->isExact());
3585   if (auto *DisjointOp = dyn_cast<PossiblyDisjointInst>(&I))
3586     Flags.setDisjoint(DisjointOp->isDisjoint());
3587   if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
3588     Flags.copyFMF(*FPOp);
3589 
3590   SDValue Op1 = getValue(I.getOperand(0));
3591   SDValue Op2 = getValue(I.getOperand(1));
3592   SDValue BinNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(),
3593                                      Op1, Op2, Flags);
3594   setValue(&I, BinNodeValue);
3595 }
3596 
3597 void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
3598   SDValue Op1 = getValue(I.getOperand(0));
3599   SDValue Op2 = getValue(I.getOperand(1));
3600 
3601   EVT ShiftTy = DAG.getTargetLoweringInfo().getShiftAmountTy(
3602       Op1.getValueType(), DAG.getDataLayout());
3603 
3604   // Coerce the shift amount to the right type if we can. This exposes the
3605   // truncate or zext to optimization early.
3606   if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {
3607     assert(ShiftTy.getSizeInBits() >= Log2_32_Ceil(Op1.getValueSizeInBits()) &&
3608            "Unexpected shift type");
3609     Op2 = DAG.getZExtOrTrunc(Op2, getCurSDLoc(), ShiftTy);
3610   }
3611 
3612   bool nuw = false;
3613   bool nsw = false;
3614   bool exact = false;
3615 
3616   if (Opcode == ISD::SRL || Opcode == ISD::SRA || Opcode == ISD::SHL) {
3617 
3618     if (const OverflowingBinaryOperator *OFBinOp =
3619             dyn_cast<const OverflowingBinaryOperator>(&I)) {
3620       nuw = OFBinOp->hasNoUnsignedWrap();
3621       nsw = OFBinOp->hasNoSignedWrap();
3622     }
3623     if (const PossiblyExactOperator *ExactOp =
3624             dyn_cast<const PossiblyExactOperator>(&I))
3625       exact = ExactOp->isExact();
3626   }
3627   SDNodeFlags Flags;
3628   Flags.setExact(exact);
3629   Flags.setNoSignedWrap(nsw);
3630   Flags.setNoUnsignedWrap(nuw);
3631   SDValue Res = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(), Op1, Op2,
3632                             Flags);
3633   setValue(&I, Res);
3634 }
3635 
3636 void SelectionDAGBuilder::visitSDiv(const User &I) {
3637   SDValue Op1 = getValue(I.getOperand(0));
3638   SDValue Op2 = getValue(I.getOperand(1));
3639 
3640   SDNodeFlags Flags;
3641   Flags.setExact(isa<PossiblyExactOperator>(&I) &&
3642                  cast<PossiblyExactOperator>(&I)->isExact());
3643   setValue(&I, DAG.getNode(ISD::SDIV, getCurSDLoc(), Op1.getValueType(), Op1,
3644                            Op2, Flags));
3645 }
3646 
3647 void SelectionDAGBuilder::visitICmp(const ICmpInst &I) {
3648   ICmpInst::Predicate predicate = I.getPredicate();
3649   SDValue Op1 = getValue(I.getOperand(0));
3650   SDValue Op2 = getValue(I.getOperand(1));
3651   ISD::CondCode Opcode = getICmpCondCode(predicate);
3652 
3653   auto &TLI = DAG.getTargetLoweringInfo();
3654   EVT MemVT =
3655       TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
3656 
3657   // If a pointer's DAG type is larger than its memory type then the DAG values
3658   // are zero-extended. This breaks signed comparisons so truncate back to the
3659   // underlying type before doing the compare.
3660   if (Op1.getValueType() != MemVT) {
3661     Op1 = DAG.getPtrExtOrTrunc(Op1, getCurSDLoc(), MemVT);
3662     Op2 = DAG.getPtrExtOrTrunc(Op2, getCurSDLoc(), MemVT);
3663   }
3664 
3665   SDNodeFlags Flags;
3666   Flags.setSameSign(I.hasSameSign());
3667   SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
3668 
3669   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3670                                                         I.getType());
3671   setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Opcode));
3672 }
3673 
3674 void SelectionDAGBuilder::visitFCmp(const FCmpInst &I) {
3675   FCmpInst::Predicate predicate = I.getPredicate();
3676   SDValue Op1 = getValue(I.getOperand(0));
3677   SDValue Op2 = getValue(I.getOperand(1));
3678 
3679   ISD::CondCode Condition = getFCmpCondCode(predicate);
3680   auto *FPMO = cast<FPMathOperator>(&I);
3681   if (FPMO->hasNoNaNs() || TM.Options.NoNaNsFPMath)
3682     Condition = getFCmpCodeWithoutNaN(Condition);
3683 
3684   SDNodeFlags Flags;
3685   Flags.copyFMF(*FPMO);
3686   SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
3687 
3688   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3689                                                         I.getType());
3690   setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition));
3691 }
3692 
3693 // Check if the condition of the select has one use or two users that are both
3694 // selects with the same condition.
3695 static bool hasOnlySelectUsers(const Value *Cond) {
3696   return llvm::all_of(Cond->users(), [](const Value *V) {
3697     return isa<SelectInst>(V);
3698   });
3699 }
3700 
3701 void SelectionDAGBuilder::visitSelect(const User &I) {
3702   SmallVector<EVT, 4> ValueVTs;
3703   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
3704                   ValueVTs);
3705   unsigned NumValues = ValueVTs.size();
3706   if (NumValues == 0) return;
3707 
3708   SmallVector<SDValue, 4> Values(NumValues);
3709   SDValue Cond     = getValue(I.getOperand(0));
3710   SDValue LHSVal   = getValue(I.getOperand(1));
3711   SDValue RHSVal   = getValue(I.getOperand(2));
3712   SmallVector<SDValue, 1> BaseOps(1, Cond);
3713   ISD::NodeType OpCode =
3714       Cond.getValueType().isVector() ? ISD::VSELECT : ISD::SELECT;
3715 
3716   bool IsUnaryAbs = false;
3717   bool Negate = false;
3718 
3719   SDNodeFlags Flags;
3720   if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
3721     Flags.copyFMF(*FPOp);
3722 
3723   Flags.setUnpredictable(
3724       cast<SelectInst>(I).getMetadata(LLVMContext::MD_unpredictable));
3725 
3726   // Min/max matching is only viable if all output VTs are the same.
3727   if (all_equal(ValueVTs)) {
3728     EVT VT = ValueVTs[0];
3729     LLVMContext &Ctx = *DAG.getContext();
3730     auto &TLI = DAG.getTargetLoweringInfo();
3731 
3732     // We care about the legality of the operation after it has been type
3733     // legalized.
3734     while (TLI.getTypeAction(Ctx, VT) != TargetLoweringBase::TypeLegal)
3735       VT = TLI.getTypeToTransformTo(Ctx, VT);
3736 
3737     // If the vselect is legal, assume we want to leave this as a vector setcc +
3738     // vselect. Otherwise, if this is going to be scalarized, we want to see if
3739     // min/max is legal on the scalar type.
3740     bool UseScalarMinMax = VT.isVector() &&
3741       !TLI.isOperationLegalOrCustom(ISD::VSELECT, VT);
3742 
3743     // ValueTracking's select pattern matching does not account for -0.0,
3744     // so we can't lower to FMINIMUM/FMAXIMUM because those nodes specify that
3745     // -0.0 is less than +0.0.
3746     const Value *LHS, *RHS;
3747     auto SPR = matchSelectPattern(&I, LHS, RHS);
3748     ISD::NodeType Opc = ISD::DELETED_NODE;
3749     switch (SPR.Flavor) {
3750     case SPF_UMAX:    Opc = ISD::UMAX; break;
3751     case SPF_UMIN:    Opc = ISD::UMIN; break;
3752     case SPF_SMAX:    Opc = ISD::SMAX; break;
3753     case SPF_SMIN:    Opc = ISD::SMIN; break;
3754     case SPF_FMINNUM:
3755       switch (SPR.NaNBehavior) {
3756       case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
3757       case SPNB_RETURNS_NAN: break;
3758       case SPNB_RETURNS_OTHER: Opc = ISD::FMINNUM; break;
3759       case SPNB_RETURNS_ANY:
3760         if (TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT) ||
3761             (UseScalarMinMax &&
3762              TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT.getScalarType())))
3763           Opc = ISD::FMINNUM;
3764         break;
3765       }
3766       break;
3767     case SPF_FMAXNUM:
3768       switch (SPR.NaNBehavior) {
3769       case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
3770       case SPNB_RETURNS_NAN: break;
3771       case SPNB_RETURNS_OTHER: Opc = ISD::FMAXNUM; break;
3772       case SPNB_RETURNS_ANY:
3773         if (TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT) ||
3774             (UseScalarMinMax &&
3775              TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT.getScalarType())))
3776           Opc = ISD::FMAXNUM;
3777         break;
3778       }
3779       break;
3780     case SPF_NABS:
3781       Negate = true;
3782       [[fallthrough]];
3783     case SPF_ABS:
3784       IsUnaryAbs = true;
3785       Opc = ISD::ABS;
3786       break;
3787     default: break;
3788     }
3789 
3790     if (!IsUnaryAbs && Opc != ISD::DELETED_NODE &&
3791         (TLI.isOperationLegalOrCustom(Opc, VT) ||
3792          (UseScalarMinMax &&
3793           TLI.isOperationLegalOrCustom(Opc, VT.getScalarType()))) &&
3794         // If the underlying comparison instruction is used by any other
3795         // instruction, the consumed instructions won't be destroyed, so it is
3796         // not profitable to convert to a min/max.
3797         hasOnlySelectUsers(cast<SelectInst>(I).getCondition())) {
3798       OpCode = Opc;
3799       LHSVal = getValue(LHS);
3800       RHSVal = getValue(RHS);
3801       BaseOps.clear();
3802     }
3803 
3804     if (IsUnaryAbs) {
3805       OpCode = Opc;
3806       LHSVal = getValue(LHS);
3807       BaseOps.clear();
3808     }
3809   }
3810 
3811   if (IsUnaryAbs) {
3812     for (unsigned i = 0; i != NumValues; ++i) {
3813       SDLoc dl = getCurSDLoc();
3814       EVT VT = LHSVal.getNode()->getValueType(LHSVal.getResNo() + i);
3815       Values[i] =
3816           DAG.getNode(OpCode, dl, VT, LHSVal.getValue(LHSVal.getResNo() + i));
3817       if (Negate)
3818         Values[i] = DAG.getNegative(Values[i], dl, VT);
3819     }
3820   } else {
3821     for (unsigned i = 0; i != NumValues; ++i) {
3822       SmallVector<SDValue, 3> Ops(BaseOps.begin(), BaseOps.end());
3823       Ops.push_back(SDValue(LHSVal.getNode(), LHSVal.getResNo() + i));
3824       Ops.push_back(SDValue(RHSVal.getNode(), RHSVal.getResNo() + i));
3825       Values[i] = DAG.getNode(
3826           OpCode, getCurSDLoc(),
3827           LHSVal.getNode()->getValueType(LHSVal.getResNo() + i), Ops, Flags);
3828     }
3829   }
3830 
3831   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3832                            DAG.getVTList(ValueVTs), Values));
3833 }
3834 
3835 void SelectionDAGBuilder::visitTrunc(const User &I) {
3836   // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
3837   SDValue N = getValue(I.getOperand(0));
3838   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3839                                                         I.getType());
3840   SDNodeFlags Flags;
3841   if (auto *Trunc = dyn_cast<TruncInst>(&I)) {
3842     Flags.setNoSignedWrap(Trunc->hasNoSignedWrap());
3843     Flags.setNoUnsignedWrap(Trunc->hasNoUnsignedWrap());
3844   }
3845 
3846   setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), DestVT, N, Flags));
3847 }
3848 
3849 void SelectionDAGBuilder::visitZExt(const User &I) {
3850   // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3851   // ZExt also can't be a cast to bool for same reason. So, nothing much to do
3852   SDValue N = getValue(I.getOperand(0));
3853   auto &TLI = DAG.getTargetLoweringInfo();
3854   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3855 
3856   SDNodeFlags Flags;
3857   if (auto *PNI = dyn_cast<PossiblyNonNegInst>(&I))
3858     Flags.setNonNeg(PNI->hasNonNeg());
3859 
3860   // Eagerly use nonneg information to canonicalize towards sign_extend if
3861   // that is the target's preference.
3862   // TODO: Let the target do this later.
3863   if (Flags.hasNonNeg() &&
3864       TLI.isSExtCheaperThanZExt(N.getValueType(), DestVT)) {
3865     setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N));
3866     return;
3867   }
3868 
3869   setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurSDLoc(), DestVT, N, Flags));
3870 }
3871 
3872 void SelectionDAGBuilder::visitSExt(const User &I) {
3873   // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3874   // SExt also can't be a cast to bool for same reason. So, nothing much to do
3875   SDValue N = getValue(I.getOperand(0));
3876   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3877                                                         I.getType());
3878   setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N));
3879 }
3880 
3881 void SelectionDAGBuilder::visitFPTrunc(const User &I) {
3882   // FPTrunc is never a no-op cast, no need to check
3883   SDValue N = getValue(I.getOperand(0));
3884   SDLoc dl = getCurSDLoc();
3885   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3886   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3887   setValue(&I, DAG.getNode(ISD::FP_ROUND, dl, DestVT, N,
3888                            DAG.getTargetConstant(
3889                                0, dl, TLI.getPointerTy(DAG.getDataLayout()))));
3890 }
3891 
3892 void SelectionDAGBuilder::visitFPExt(const User &I) {
3893   // FPExt is never a no-op cast, no need to check
3894   SDValue N = getValue(I.getOperand(0));
3895   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3896                                                         I.getType());
3897   setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurSDLoc(), DestVT, N));
3898 }
3899 
3900 void SelectionDAGBuilder::visitFPToUI(const User &I) {
3901   // FPToUI is never a no-op cast, no need to check
3902   SDValue N = getValue(I.getOperand(0));
3903   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3904                                                         I.getType());
3905   setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurSDLoc(), DestVT, N));
3906 }
3907 
3908 void SelectionDAGBuilder::visitFPToSI(const User &I) {
3909   // FPToSI is never a no-op cast, no need to check
3910   SDValue N = getValue(I.getOperand(0));
3911   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3912                                                         I.getType());
3913   setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurSDLoc(), DestVT, N));
3914 }
3915 
3916 void SelectionDAGBuilder::visitUIToFP(const User &I) {
3917   // UIToFP is never a no-op cast, no need to check
3918   SDValue N = getValue(I.getOperand(0));
3919   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3920                                                         I.getType());
3921   SDNodeFlags Flags;
3922   if (auto *PNI = dyn_cast<PossiblyNonNegInst>(&I))
3923     Flags.setNonNeg(PNI->hasNonNeg());
3924 
3925   setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurSDLoc(), DestVT, N, Flags));
3926 }
3927 
3928 void SelectionDAGBuilder::visitSIToFP(const User &I) {
3929   // SIToFP is never a no-op cast, no need to check
3930   SDValue N = getValue(I.getOperand(0));
3931   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3932                                                         I.getType());
3933   setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurSDLoc(), DestVT, N));
3934 }
3935 
3936 void SelectionDAGBuilder::visitPtrToInt(const User &I) {
3937   // What to do depends on the size of the integer and the size of the pointer.
3938   // We can either truncate, zero extend, or no-op, accordingly.
3939   SDValue N = getValue(I.getOperand(0));
3940   auto &TLI = DAG.getTargetLoweringInfo();
3941   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3942                                                         I.getType());
3943   EVT PtrMemVT =
3944       TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
3945   N = DAG.getPtrExtOrTrunc(N, getCurSDLoc(), PtrMemVT);
3946   N = DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT);
3947   setValue(&I, N);
3948 }
3949 
3950 void SelectionDAGBuilder::visitIntToPtr(const User &I) {
3951   // What to do depends on the size of the integer and the size of the pointer.
3952   // We can either truncate, zero extend, or no-op, accordingly.
3953   SDValue N = getValue(I.getOperand(0));
3954   auto &TLI = DAG.getTargetLoweringInfo();
3955   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3956   EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getType());
3957   N = DAG.getZExtOrTrunc(N, getCurSDLoc(), PtrMemVT);
3958   N = DAG.getPtrExtOrTrunc(N, getCurSDLoc(), DestVT);
3959   setValue(&I, N);
3960 }
3961 
3962 void SelectionDAGBuilder::visitBitCast(const User &I) {
3963   SDValue N = getValue(I.getOperand(0));
3964   SDLoc dl = getCurSDLoc();
3965   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3966                                                         I.getType());
3967 
3968   // BitCast assures us that source and destination are the same size so this is
3969   // either a BITCAST or a no-op.
3970   if (DestVT != N.getValueType())
3971     setValue(&I, DAG.getNode(ISD::BITCAST, dl,
3972                              DestVT, N)); // convert types.
3973   // Check if the original LLVM IR Operand was a ConstantInt, because getValue()
3974   // might fold any kind of constant expression to an integer constant and that
3975   // is not what we are looking for. Only recognize a bitcast of a genuine
3976   // constant integer as an opaque constant.
3977   else if(ConstantInt *C = dyn_cast<ConstantInt>(I.getOperand(0)))
3978     setValue(&I, DAG.getConstant(C->getValue(), dl, DestVT, /*isTarget=*/false,
3979                                  /*isOpaque*/true));
3980   else
3981     setValue(&I, N);            // noop cast.
3982 }
3983 
3984 void SelectionDAGBuilder::visitAddrSpaceCast(const User &I) {
3985   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3986   const Value *SV = I.getOperand(0);
3987   SDValue N = getValue(SV);
3988   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3989 
3990   unsigned SrcAS = SV->getType()->getPointerAddressSpace();
3991   unsigned DestAS = I.getType()->getPointerAddressSpace();
3992 
3993   if (!TM.isNoopAddrSpaceCast(SrcAS, DestAS))
3994     N = DAG.getAddrSpaceCast(getCurSDLoc(), DestVT, N, SrcAS, DestAS);
3995 
3996   setValue(&I, N);
3997 }
3998 
3999 void SelectionDAGBuilder::visitInsertElement(const User &I) {
4000   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4001   SDValue InVec = getValue(I.getOperand(0));
4002   SDValue InVal = getValue(I.getOperand(1));
4003   SDValue InIdx = DAG.getZExtOrTrunc(getValue(I.getOperand(2)), getCurSDLoc(),
4004                                      TLI.getVectorIdxTy(DAG.getDataLayout()));
4005   setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurSDLoc(),
4006                            TLI.getValueType(DAG.getDataLayout(), I.getType()),
4007                            InVec, InVal, InIdx));
4008 }
4009 
4010 void SelectionDAGBuilder::visitExtractElement(const User &I) {
4011   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4012   SDValue InVec = getValue(I.getOperand(0));
4013   SDValue InIdx = DAG.getZExtOrTrunc(getValue(I.getOperand(1)), getCurSDLoc(),
4014                                      TLI.getVectorIdxTy(DAG.getDataLayout()));
4015   setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(),
4016                            TLI.getValueType(DAG.getDataLayout(), I.getType()),
4017                            InVec, InIdx));
4018 }
4019 
4020 void SelectionDAGBuilder::visitShuffleVector(const User &I) {
4021   SDValue Src1 = getValue(I.getOperand(0));
4022   SDValue Src2 = getValue(I.getOperand(1));
4023   ArrayRef<int> Mask;
4024   if (auto *SVI = dyn_cast<ShuffleVectorInst>(&I))
4025     Mask = SVI->getShuffleMask();
4026   else
4027     Mask = cast<ConstantExpr>(I).getShuffleMask();
4028   SDLoc DL = getCurSDLoc();
4029   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4030   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4031   EVT SrcVT = Src1.getValueType();
4032 
4033   if (all_of(Mask, [](int Elem) { return Elem == 0; }) &&
4034       VT.isScalableVector()) {
4035     // Canonical splat form of first element of first input vector.
4036     SDValue FirstElt =
4037         DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SrcVT.getScalarType(), Src1,
4038                     DAG.getVectorIdxConstant(0, DL));
4039     setValue(&I, DAG.getNode(ISD::SPLAT_VECTOR, DL, VT, FirstElt));
4040     return;
4041   }
4042 
4043   // For now, we only handle splats for scalable vectors.
4044   // The DAGCombiner will perform a BUILD_VECTOR -> SPLAT_VECTOR transformation
4045   // for targets that support a SPLAT_VECTOR for non-scalable vector types.
4046   assert(!VT.isScalableVector() && "Unsupported scalable vector shuffle");
4047 
4048   unsigned SrcNumElts = SrcVT.getVectorNumElements();
4049   unsigned MaskNumElts = Mask.size();
4050 
4051   if (SrcNumElts == MaskNumElts) {
4052     setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, Mask));
4053     return;
4054   }
4055 
4056   // Normalize the shuffle vector since mask and vector length don't match.
4057   if (SrcNumElts < MaskNumElts) {
4058     // Mask is longer than the source vectors. We can use concatenate vector to
4059     // make the mask and vectors lengths match.
4060 
4061     if (MaskNumElts % SrcNumElts == 0) {
4062       // Mask length is a multiple of the source vector length.
4063       // Check if the shuffle is some kind of concatenation of the input
4064       // vectors.
4065       unsigned NumConcat = MaskNumElts / SrcNumElts;
4066       bool IsConcat = true;
4067       SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
4068       for (unsigned i = 0; i != MaskNumElts; ++i) {
4069         int Idx = Mask[i];
4070         if (Idx < 0)
4071           continue;
4072         // Ensure the indices in each SrcVT sized piece are sequential and that
4073         // the same source is used for the whole piece.
4074         if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
4075             (ConcatSrcs[i / SrcNumElts] >= 0 &&
4076              ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts))) {
4077           IsConcat = false;
4078           break;
4079         }
4080         // Remember which source this index came from.
4081         ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
4082       }
4083 
4084       // The shuffle is concatenating multiple vectors together. Just emit
4085       // a CONCAT_VECTORS operation.
4086       if (IsConcat) {
4087         SmallVector<SDValue, 8> ConcatOps;
4088         for (auto Src : ConcatSrcs) {
4089           if (Src < 0)
4090             ConcatOps.push_back(DAG.getUNDEF(SrcVT));
4091           else if (Src == 0)
4092             ConcatOps.push_back(Src1);
4093           else
4094             ConcatOps.push_back(Src2);
4095         }
4096         setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps));
4097         return;
4098       }
4099     }
4100 
4101     unsigned PaddedMaskNumElts = alignTo(MaskNumElts, SrcNumElts);
4102     unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
4103     EVT PaddedVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(),
4104                                     PaddedMaskNumElts);
4105 
4106     // Pad both vectors with undefs to make them the same length as the mask.
4107     SDValue UndefVal = DAG.getUNDEF(SrcVT);
4108 
4109     SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
4110     SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
4111     MOps1[0] = Src1;
4112     MOps2[0] = Src2;
4113 
4114     Src1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps1);
4115     Src2 = DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps2);
4116 
4117     // Readjust mask for new input vector length.
4118     SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1);
4119     for (unsigned i = 0; i != MaskNumElts; ++i) {
4120       int Idx = Mask[i];
4121       if (Idx >= (int)SrcNumElts)
4122         Idx -= SrcNumElts - PaddedMaskNumElts;
4123       MappedOps[i] = Idx;
4124     }
4125 
4126     SDValue Result = DAG.getVectorShuffle(PaddedVT, DL, Src1, Src2, MappedOps);
4127 
4128     // If the concatenated vector was padded, extract a subvector with the
4129     // correct number of elements.
4130     if (MaskNumElts != PaddedMaskNumElts)
4131       Result = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Result,
4132                            DAG.getVectorIdxConstant(0, DL));
4133 
4134     setValue(&I, Result);
4135     return;
4136   }
4137 
4138   assert(SrcNumElts > MaskNumElts);
4139 
4140   // Analyze the access pattern of the vector to see if we can extract
4141   // two subvectors and do the shuffle.
4142   int StartIdx[2] = {-1, -1}; // StartIdx to extract from
4143   bool CanExtract = true;
4144   for (int Idx : Mask) {
4145     unsigned Input = 0;
4146     if (Idx < 0)
4147       continue;
4148 
4149     if (Idx >= (int)SrcNumElts) {
4150       Input = 1;
4151       Idx -= SrcNumElts;
4152     }
4153 
4154     // If all the indices come from the same MaskNumElts sized portion of
4155     // the sources we can use extract. Also make sure the extract wouldn't
4156     // extract past the end of the source.
4157     int NewStartIdx = alignDown(Idx, MaskNumElts);
4158     if (NewStartIdx + MaskNumElts > SrcNumElts ||
4159         (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
4160       CanExtract = false;
4161     // Make sure we always update StartIdx as we use it to track if all
4162     // elements are undef.
4163     StartIdx[Input] = NewStartIdx;
4164   }
4165 
4166   if (StartIdx[0] < 0 && StartIdx[1] < 0) {
4167     setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
4168     return;
4169   }
4170   if (CanExtract) {
4171     // Extract appropriate subvector and generate a vector shuffle
4172     for (unsigned Input = 0; Input < 2; ++Input) {
4173       SDValue &Src = Input == 0 ? Src1 : Src2;
4174       if (StartIdx[Input] < 0)
4175         Src = DAG.getUNDEF(VT);
4176       else {
4177         Src = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Src,
4178                           DAG.getVectorIdxConstant(StartIdx[Input], DL));
4179       }
4180     }
4181 
4182     // Calculate new mask.
4183     SmallVector<int, 8> MappedOps(Mask);
4184     for (int &Idx : MappedOps) {
4185       if (Idx >= (int)SrcNumElts)
4186         Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
4187       else if (Idx >= 0)
4188         Idx -= StartIdx[0];
4189     }
4190 
4191     setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, MappedOps));
4192     return;
4193   }
4194 
4195   // We can't use either concat vectors or extract subvectors so fall back to
4196   // replacing the shuffle with extract and build vector.
4197   // to insert and build vector.
4198   EVT EltVT = VT.getVectorElementType();
4199   SmallVector<SDValue,8> Ops;
4200   for (int Idx : Mask) {
4201     SDValue Res;
4202 
4203     if (Idx < 0) {
4204       Res = DAG.getUNDEF(EltVT);
4205     } else {
4206       SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
4207       if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts;
4208 
4209       Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Src,
4210                         DAG.getVectorIdxConstant(Idx, DL));
4211     }
4212 
4213     Ops.push_back(Res);
4214   }
4215 
4216   setValue(&I, DAG.getBuildVector(VT, DL, Ops));
4217 }
4218 
4219 void SelectionDAGBuilder::visitInsertValue(const InsertValueInst &I) {
4220   ArrayRef<unsigned> Indices = I.getIndices();
4221   const Value *Op0 = I.getOperand(0);
4222   const Value *Op1 = I.getOperand(1);
4223   Type *AggTy = I.getType();
4224   Type *ValTy = Op1->getType();
4225   bool IntoUndef = isa<UndefValue>(Op0);
4226   bool FromUndef = isa<UndefValue>(Op1);
4227 
4228   unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
4229 
4230   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4231   SmallVector<EVT, 4> AggValueVTs;
4232   ComputeValueVTs(TLI, DAG.getDataLayout(), AggTy, AggValueVTs);
4233   SmallVector<EVT, 4> ValValueVTs;
4234   ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
4235 
4236   unsigned NumAggValues = AggValueVTs.size();
4237   unsigned NumValValues = ValValueVTs.size();
4238   SmallVector<SDValue, 4> Values(NumAggValues);
4239 
4240   // Ignore an insertvalue that produces an empty object
4241   if (!NumAggValues) {
4242     setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
4243     return;
4244   }
4245 
4246   SDValue Agg = getValue(Op0);
4247   unsigned i = 0;
4248   // Copy the beginning value(s) from the original aggregate.
4249   for (; i != LinearIndex; ++i)
4250     Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
4251                 SDValue(Agg.getNode(), Agg.getResNo() + i);
4252   // Copy values from the inserted value(s).
4253   if (NumValValues) {
4254     SDValue Val = getValue(Op1);
4255     for (; i != LinearIndex + NumValValues; ++i)
4256       Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
4257                   SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
4258   }
4259   // Copy remaining value(s) from the original aggregate.
4260   for (; i != NumAggValues; ++i)
4261     Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
4262                 SDValue(Agg.getNode(), Agg.getResNo() + i);
4263 
4264   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
4265                            DAG.getVTList(AggValueVTs), Values));
4266 }
4267 
4268 void SelectionDAGBuilder::visitExtractValue(const ExtractValueInst &I) {
4269   ArrayRef<unsigned> Indices = I.getIndices();
4270   const Value *Op0 = I.getOperand(0);
4271   Type *AggTy = Op0->getType();
4272   Type *ValTy = I.getType();
4273   bool OutOfUndef = isa<UndefValue>(Op0);
4274 
4275   unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
4276 
4277   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4278   SmallVector<EVT, 4> ValValueVTs;
4279   ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
4280 
4281   unsigned NumValValues = ValValueVTs.size();
4282 
4283   // Ignore a extractvalue that produces an empty object
4284   if (!NumValValues) {
4285     setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
4286     return;
4287   }
4288 
4289   SmallVector<SDValue, 4> Values(NumValValues);
4290 
4291   SDValue Agg = getValue(Op0);
4292   // Copy out the selected value(s).
4293   for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
4294     Values[i - LinearIndex] =
4295       OutOfUndef ?
4296         DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
4297         SDValue(Agg.getNode(), Agg.getResNo() + i);
4298 
4299   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
4300                            DAG.getVTList(ValValueVTs), Values));
4301 }
4302 
4303 void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
4304   Value *Op0 = I.getOperand(0);
4305   // Note that the pointer operand may be a vector of pointers. Take the scalar
4306   // element which holds a pointer.
4307   unsigned AS = Op0->getType()->getScalarType()->getPointerAddressSpace();
4308   SDValue N = getValue(Op0);
4309   SDLoc dl = getCurSDLoc();
4310   auto &TLI = DAG.getTargetLoweringInfo();
4311   GEPNoWrapFlags NW = cast<GEPOperator>(I).getNoWrapFlags();
4312 
4313   // Normalize Vector GEP - all scalar operands should be converted to the
4314   // splat vector.
4315   bool IsVectorGEP = I.getType()->isVectorTy();
4316   ElementCount VectorElementCount =
4317       IsVectorGEP ? cast<VectorType>(I.getType())->getElementCount()
4318                   : ElementCount::getFixed(0);
4319 
4320   if (IsVectorGEP && !N.getValueType().isVector()) {
4321     LLVMContext &Context = *DAG.getContext();
4322     EVT VT = EVT::getVectorVT(Context, N.getValueType(), VectorElementCount);
4323     N = DAG.getSplat(VT, dl, N);
4324   }
4325 
4326   for (gep_type_iterator GTI = gep_type_begin(&I), E = gep_type_end(&I);
4327        GTI != E; ++GTI) {
4328     const Value *Idx = GTI.getOperand();
4329     if (StructType *StTy = GTI.getStructTypeOrNull()) {
4330       unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
4331       if (Field) {
4332         // N = N + Offset
4333         uint64_t Offset =
4334             DAG.getDataLayout().getStructLayout(StTy)->getElementOffset(Field);
4335 
4336         // In an inbounds GEP with an offset that is nonnegative even when
4337         // interpreted as signed, assume there is no unsigned overflow.
4338         SDNodeFlags Flags;
4339         if (NW.hasNoUnsignedWrap() ||
4340             (int64_t(Offset) >= 0 && NW.hasNoUnsignedSignedWrap()))
4341           Flags |= SDNodeFlags::NoUnsignedWrap;
4342 
4343         N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N,
4344                         DAG.getConstant(Offset, dl, N.getValueType()), Flags);
4345       }
4346     } else {
4347       // IdxSize is the width of the arithmetic according to IR semantics.
4348       // In SelectionDAG, we may prefer to do arithmetic in a wider bitwidth
4349       // (and fix up the result later).
4350       unsigned IdxSize = DAG.getDataLayout().getIndexSizeInBits(AS);
4351       MVT IdxTy = MVT::getIntegerVT(IdxSize);
4352       TypeSize ElementSize =
4353           GTI.getSequentialElementStride(DAG.getDataLayout());
4354       // We intentionally mask away the high bits here; ElementSize may not
4355       // fit in IdxTy.
4356       APInt ElementMul(IdxSize, ElementSize.getKnownMinValue(),
4357                        /*isSigned=*/false, /*implicitTrunc=*/true);
4358       bool ElementScalable = ElementSize.isScalable();
4359 
4360       // If this is a scalar constant or a splat vector of constants,
4361       // handle it quickly.
4362       const auto *C = dyn_cast<Constant>(Idx);
4363       if (C && isa<VectorType>(C->getType()))
4364         C = C->getSplatValue();
4365 
4366       const auto *CI = dyn_cast_or_null<ConstantInt>(C);
4367       if (CI && CI->isZero())
4368         continue;
4369       if (CI && !ElementScalable) {
4370         APInt Offs = ElementMul * CI->getValue().sextOrTrunc(IdxSize);
4371         LLVMContext &Context = *DAG.getContext();
4372         SDValue OffsVal;
4373         if (IsVectorGEP)
4374           OffsVal = DAG.getConstant(
4375               Offs, dl, EVT::getVectorVT(Context, IdxTy, VectorElementCount));
4376         else
4377           OffsVal = DAG.getConstant(Offs, dl, IdxTy);
4378 
4379         // In an inbounds GEP with an offset that is nonnegative even when
4380         // interpreted as signed, assume there is no unsigned overflow.
4381         SDNodeFlags Flags;
4382         if (NW.hasNoUnsignedWrap() ||
4383             (Offs.isNonNegative() && NW.hasNoUnsignedSignedWrap()))
4384           Flags.setNoUnsignedWrap(true);
4385 
4386         OffsVal = DAG.getSExtOrTrunc(OffsVal, dl, N.getValueType());
4387 
4388         N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N, OffsVal, Flags);
4389         continue;
4390       }
4391 
4392       // N = N + Idx * ElementMul;
4393       SDValue IdxN = getValue(Idx);
4394 
4395       if (!IdxN.getValueType().isVector() && IsVectorGEP) {
4396         EVT VT = EVT::getVectorVT(*Context, IdxN.getValueType(),
4397                                   VectorElementCount);
4398         IdxN = DAG.getSplat(VT, dl, IdxN);
4399       }
4400 
4401       // If the index is smaller or larger than intptr_t, truncate or extend
4402       // it.
4403       IdxN = DAG.getSExtOrTrunc(IdxN, dl, N.getValueType());
4404 
4405       SDNodeFlags ScaleFlags;
4406       // The multiplication of an index by the type size does not wrap the
4407       // pointer index type in a signed sense (mul nsw).
4408       ScaleFlags.setNoSignedWrap(NW.hasNoUnsignedSignedWrap());
4409 
4410       // The multiplication of an index by the type size does not wrap the
4411       // pointer index type in an unsigned sense (mul nuw).
4412       ScaleFlags.setNoUnsignedWrap(NW.hasNoUnsignedWrap());
4413 
4414       if (ElementScalable) {
4415         EVT VScaleTy = N.getValueType().getScalarType();
4416         SDValue VScale = DAG.getNode(
4417             ISD::VSCALE, dl, VScaleTy,
4418             DAG.getConstant(ElementMul.getZExtValue(), dl, VScaleTy));
4419         if (IsVectorGEP)
4420           VScale = DAG.getSplatVector(N.getValueType(), dl, VScale);
4421         IdxN = DAG.getNode(ISD::MUL, dl, N.getValueType(), IdxN, VScale,
4422                            ScaleFlags);
4423       } else {
4424         // If this is a multiply by a power of two, turn it into a shl
4425         // immediately.  This is a very common case.
4426         if (ElementMul != 1) {
4427           if (ElementMul.isPowerOf2()) {
4428             unsigned Amt = ElementMul.logBase2();
4429             IdxN = DAG.getNode(ISD::SHL, dl, N.getValueType(), IdxN,
4430                                DAG.getConstant(Amt, dl, IdxN.getValueType()),
4431                                ScaleFlags);
4432           } else {
4433             SDValue Scale = DAG.getConstant(ElementMul.getZExtValue(), dl,
4434                                             IdxN.getValueType());
4435             IdxN = DAG.getNode(ISD::MUL, dl, N.getValueType(), IdxN, Scale,
4436                                ScaleFlags);
4437           }
4438         }
4439       }
4440 
4441       // The successive addition of the current address, truncated to the
4442       // pointer index type and interpreted as an unsigned number, and each
4443       // offset, also interpreted as an unsigned number, does not wrap the
4444       // pointer index type (add nuw).
4445       SDNodeFlags AddFlags;
4446       AddFlags.setNoUnsignedWrap(NW.hasNoUnsignedWrap());
4447 
4448       N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N, IdxN, AddFlags);
4449     }
4450   }
4451 
4452   MVT PtrTy = TLI.getPointerTy(DAG.getDataLayout(), AS);
4453   MVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout(), AS);
4454   if (IsVectorGEP) {
4455     PtrTy = MVT::getVectorVT(PtrTy, VectorElementCount);
4456     PtrMemTy = MVT::getVectorVT(PtrMemTy, VectorElementCount);
4457   }
4458 
4459   if (PtrMemTy != PtrTy && !cast<GEPOperator>(I).isInBounds())
4460     N = DAG.getPtrExtendInReg(N, dl, PtrMemTy);
4461 
4462   setValue(&I, N);
4463 }
4464 
4465 void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
4466   // If this is a fixed sized alloca in the entry block of the function,
4467   // allocate it statically on the stack.
4468   if (FuncInfo.StaticAllocaMap.count(&I))
4469     return;   // getValue will auto-populate this.
4470 
4471   SDLoc dl = getCurSDLoc();
4472   Type *Ty = I.getAllocatedType();
4473   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4474   auto &DL = DAG.getDataLayout();
4475   TypeSize TySize = DL.getTypeAllocSize(Ty);
4476   MaybeAlign Alignment = std::max(DL.getPrefTypeAlign(Ty), I.getAlign());
4477 
4478   SDValue AllocSize = getValue(I.getArraySize());
4479 
4480   EVT IntPtr = TLI.getPointerTy(DL, I.getAddressSpace());
4481   if (AllocSize.getValueType() != IntPtr)
4482     AllocSize = DAG.getZExtOrTrunc(AllocSize, dl, IntPtr);
4483 
4484   if (TySize.isScalable())
4485     AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr, AllocSize,
4486                             DAG.getVScale(dl, IntPtr,
4487                                           APInt(IntPtr.getScalarSizeInBits(),
4488                                                 TySize.getKnownMinValue())));
4489   else {
4490     SDValue TySizeValue =
4491         DAG.getConstant(TySize.getFixedValue(), dl, MVT::getIntegerVT(64));
4492     AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr, AllocSize,
4493                             DAG.getZExtOrTrunc(TySizeValue, dl, IntPtr));
4494   }
4495 
4496   // Handle alignment.  If the requested alignment is less than or equal to
4497   // the stack alignment, ignore it.  If the size is greater than or equal to
4498   // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
4499   Align StackAlign = DAG.getSubtarget().getFrameLowering()->getStackAlign();
4500   if (*Alignment <= StackAlign)
4501     Alignment = std::nullopt;
4502 
4503   const uint64_t StackAlignMask = StackAlign.value() - 1U;
4504   // Round the size of the allocation up to the stack alignment size
4505   // by add SA-1 to the size. This doesn't overflow because we're computing
4506   // an address inside an alloca.
4507   AllocSize = DAG.getNode(ISD::ADD, dl, AllocSize.getValueType(), AllocSize,
4508                           DAG.getConstant(StackAlignMask, dl, IntPtr),
4509                           SDNodeFlags::NoUnsignedWrap);
4510 
4511   // Mask out the low bits for alignment purposes.
4512   AllocSize = DAG.getNode(ISD::AND, dl, AllocSize.getValueType(), AllocSize,
4513                           DAG.getSignedConstant(~StackAlignMask, dl, IntPtr));
4514 
4515   SDValue Ops[] = {
4516       getRoot(), AllocSize,
4517       DAG.getConstant(Alignment ? Alignment->value() : 0, dl, IntPtr)};
4518   SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
4519   SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, dl, VTs, Ops);
4520   setValue(&I, DSA);
4521   DAG.setRoot(DSA.getValue(1));
4522 
4523   assert(FuncInfo.MF->getFrameInfo().hasVarSizedObjects());
4524 }
4525 
4526 static const MDNode *getRangeMetadata(const Instruction &I) {
4527   // If !noundef is not present, then !range violation results in a poison
4528   // value rather than immediate undefined behavior. In theory, transferring
4529   // these annotations to SDAG is fine, but in practice there are key SDAG
4530   // transforms that are known not to be poison-safe, such as folding logical
4531   // and/or to bitwise and/or. For now, only transfer !range if !noundef is
4532   // also present.
4533   if (!I.hasMetadata(LLVMContext::MD_noundef))
4534     return nullptr;
4535   return I.getMetadata(LLVMContext::MD_range);
4536 }
4537 
4538 static std::optional<ConstantRange> getRange(const Instruction &I) {
4539   if (const auto *CB = dyn_cast<CallBase>(&I)) {
4540     // see comment in getRangeMetadata about this check
4541     if (CB->hasRetAttr(Attribute::NoUndef))
4542       return CB->getRange();
4543   }
4544   if (const MDNode *Range = getRangeMetadata(I))
4545     return getConstantRangeFromMetadata(*Range);
4546   return std::nullopt;
4547 }
4548 
4549 void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
4550   if (I.isAtomic())
4551     return visitAtomicLoad(I);
4552 
4553   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4554   const Value *SV = I.getOperand(0);
4555   if (TLI.supportSwiftError()) {
4556     // Swifterror values can come from either a function parameter with
4557     // swifterror attribute or an alloca with swifterror attribute.
4558     if (const Argument *Arg = dyn_cast<Argument>(SV)) {
4559       if (Arg->hasSwiftErrorAttr())
4560         return visitLoadFromSwiftError(I);
4561     }
4562 
4563     if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
4564       if (Alloca->isSwiftError())
4565         return visitLoadFromSwiftError(I);
4566     }
4567   }
4568 
4569   SDValue Ptr = getValue(SV);
4570 
4571   Type *Ty = I.getType();
4572   SmallVector<EVT, 4> ValueVTs, MemVTs;
4573   SmallVector<TypeSize, 4> Offsets;
4574   ComputeValueVTs(TLI, DAG.getDataLayout(), Ty, ValueVTs, &MemVTs, &Offsets);
4575   unsigned NumValues = ValueVTs.size();
4576   if (NumValues == 0)
4577     return;
4578 
4579   Align Alignment = I.getAlign();
4580   AAMDNodes AAInfo = I.getAAMetadata();
4581   const MDNode *Ranges = getRangeMetadata(I);
4582   bool isVolatile = I.isVolatile();
4583   MachineMemOperand::Flags MMOFlags =
4584       TLI.getLoadMemOperandFlags(I, DAG.getDataLayout(), AC, LibInfo);
4585 
4586   SDValue Root;
4587   bool ConstantMemory = false;
4588   if (isVolatile)
4589     // Serialize volatile loads with other side effects.
4590     Root = getRoot();
4591   else if (NumValues > MaxParallelChains)
4592     Root = getMemoryRoot();
4593   else if (AA &&
4594            AA->pointsToConstantMemory(MemoryLocation(
4595                SV,
4596                LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)),
4597                AAInfo))) {
4598     // Do not serialize (non-volatile) loads of constant memory with anything.
4599     Root = DAG.getEntryNode();
4600     ConstantMemory = true;
4601     MMOFlags |= MachineMemOperand::MOInvariant;
4602   } else {
4603     // Do not serialize non-volatile loads against each other.
4604     Root = DAG.getRoot();
4605   }
4606 
4607   SDLoc dl = getCurSDLoc();
4608 
4609   if (isVolatile)
4610     Root = TLI.prepareVolatileOrAtomicLoad(Root, dl, DAG);
4611 
4612   SmallVector<SDValue, 4> Values(NumValues);
4613   SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
4614 
4615   unsigned ChainI = 0;
4616   for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4617     // Serializing loads here may result in excessive register pressure, and
4618     // TokenFactor places arbitrary choke points on the scheduler. SD scheduling
4619     // could recover a bit by hoisting nodes upward in the chain by recognizing
4620     // they are side-effect free or do not alias. The optimizer should really
4621     // avoid this case by converting large object/array copies to llvm.memcpy
4622     // (MaxParallelChains should always remain as failsafe).
4623     if (ChainI == MaxParallelChains) {
4624       assert(PendingLoads.empty() && "PendingLoads must be serialized first");
4625       SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4626                                   ArrayRef(Chains.data(), ChainI));
4627       Root = Chain;
4628       ChainI = 0;
4629     }
4630 
4631     // TODO: MachinePointerInfo only supports a fixed length offset.
4632     MachinePointerInfo PtrInfo =
4633         !Offsets[i].isScalable() || Offsets[i].isZero()
4634             ? MachinePointerInfo(SV, Offsets[i].getKnownMinValue())
4635             : MachinePointerInfo();
4636 
4637     SDValue A = DAG.getObjectPtrOffset(dl, Ptr, Offsets[i]);
4638     SDValue L = DAG.getLoad(MemVTs[i], dl, Root, A, PtrInfo, Alignment,
4639                             MMOFlags, AAInfo, Ranges);
4640     Chains[ChainI] = L.getValue(1);
4641 
4642     if (MemVTs[i] != ValueVTs[i])
4643       L = DAG.getPtrExtOrTrunc(L, dl, ValueVTs[i]);
4644 
4645     Values[i] = L;
4646   }
4647 
4648   if (!ConstantMemory) {
4649     SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4650                                 ArrayRef(Chains.data(), ChainI));
4651     if (isVolatile)
4652       DAG.setRoot(Chain);
4653     else
4654       PendingLoads.push_back(Chain);
4655   }
4656 
4657   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, dl,
4658                            DAG.getVTList(ValueVTs), Values));
4659 }
4660 
4661 void SelectionDAGBuilder::visitStoreToSwiftError(const StoreInst &I) {
4662   assert(DAG.getTargetLoweringInfo().supportSwiftError() &&
4663          "call visitStoreToSwiftError when backend supports swifterror");
4664 
4665   SmallVector<EVT, 4> ValueVTs;
4666   SmallVector<uint64_t, 4> Offsets;
4667   const Value *SrcV = I.getOperand(0);
4668   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
4669                   SrcV->getType(), ValueVTs, &Offsets, 0);
4670   assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
4671          "expect a single EVT for swifterror");
4672 
4673   SDValue Src = getValue(SrcV);
4674   // Create a virtual register, then update the virtual register.
4675   Register VReg =
4676       SwiftError.getOrCreateVRegDefAt(&I, FuncInfo.MBB, I.getPointerOperand());
4677   // Chain, DL, Reg, N or Chain, DL, Reg, N, Glue
4678   // Chain can be getRoot or getControlRoot.
4679   SDValue CopyNode = DAG.getCopyToReg(getRoot(), getCurSDLoc(), VReg,
4680                                       SDValue(Src.getNode(), Src.getResNo()));
4681   DAG.setRoot(CopyNode);
4682 }
4683 
4684 void SelectionDAGBuilder::visitLoadFromSwiftError(const LoadInst &I) {
4685   assert(DAG.getTargetLoweringInfo().supportSwiftError() &&
4686          "call visitLoadFromSwiftError when backend supports swifterror");
4687 
4688   assert(!I.isVolatile() &&
4689          !I.hasMetadata(LLVMContext::MD_nontemporal) &&
4690          !I.hasMetadata(LLVMContext::MD_invariant_load) &&
4691          "Support volatile, non temporal, invariant for load_from_swift_error");
4692 
4693   const Value *SV = I.getOperand(0);
4694   Type *Ty = I.getType();
4695   assert(
4696       (!AA ||
4697        !AA->pointsToConstantMemory(MemoryLocation(
4698            SV, LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)),
4699            I.getAAMetadata()))) &&
4700       "load_from_swift_error should not be constant memory");
4701 
4702   SmallVector<EVT, 4> ValueVTs;
4703   SmallVector<uint64_t, 4> Offsets;
4704   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), Ty,
4705                   ValueVTs, &Offsets, 0);
4706   assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
4707          "expect a single EVT for swifterror");
4708 
4709   // Chain, DL, Reg, VT, Glue or Chain, DL, Reg, VT
4710   SDValue L = DAG.getCopyFromReg(
4711       getRoot(), getCurSDLoc(),
4712       SwiftError.getOrCreateVRegUseAt(&I, FuncInfo.MBB, SV), ValueVTs[0]);
4713 
4714   setValue(&I, L);
4715 }
4716 
4717 void SelectionDAGBuilder::visitStore(const StoreInst &I) {
4718   if (I.isAtomic())
4719     return visitAtomicStore(I);
4720 
4721   const Value *SrcV = I.getOperand(0);
4722   const Value *PtrV = I.getOperand(1);
4723 
4724   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4725   if (TLI.supportSwiftError()) {
4726     // Swifterror values can come from either a function parameter with
4727     // swifterror attribute or an alloca with swifterror attribute.
4728     if (const Argument *Arg = dyn_cast<Argument>(PtrV)) {
4729       if (Arg->hasSwiftErrorAttr())
4730         return visitStoreToSwiftError(I);
4731     }
4732 
4733     if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
4734       if (Alloca->isSwiftError())
4735         return visitStoreToSwiftError(I);
4736     }
4737   }
4738 
4739   SmallVector<EVT, 4> ValueVTs, MemVTs;
4740   SmallVector<TypeSize, 4> Offsets;
4741   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
4742                   SrcV->getType(), ValueVTs, &MemVTs, &Offsets);
4743   unsigned NumValues = ValueVTs.size();
4744   if (NumValues == 0)
4745     return;
4746 
4747   // Get the lowered operands. Note that we do this after
4748   // checking if NumResults is zero, because with zero results
4749   // the operands won't have values in the map.
4750   SDValue Src = getValue(SrcV);
4751   SDValue Ptr = getValue(PtrV);
4752 
4753   SDValue Root = I.isVolatile() ? getRoot() : getMemoryRoot();
4754   SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
4755   SDLoc dl = getCurSDLoc();
4756   Align Alignment = I.getAlign();
4757   AAMDNodes AAInfo = I.getAAMetadata();
4758 
4759   auto MMOFlags = TLI.getStoreMemOperandFlags(I, DAG.getDataLayout());
4760 
4761   unsigned ChainI = 0;
4762   for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4763     // See visitLoad comments.
4764     if (ChainI == MaxParallelChains) {
4765       SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4766                                   ArrayRef(Chains.data(), ChainI));
4767       Root = Chain;
4768       ChainI = 0;
4769     }
4770 
4771     // TODO: MachinePointerInfo only supports a fixed length offset.
4772     MachinePointerInfo PtrInfo =
4773         !Offsets[i].isScalable() || Offsets[i].isZero()
4774             ? MachinePointerInfo(PtrV, Offsets[i].getKnownMinValue())
4775             : MachinePointerInfo();
4776 
4777     SDValue Add = DAG.getObjectPtrOffset(dl, Ptr, Offsets[i]);
4778     SDValue Val = SDValue(Src.getNode(), Src.getResNo() + i);
4779     if (MemVTs[i] != ValueVTs[i])
4780       Val = DAG.getPtrExtOrTrunc(Val, dl, MemVTs[i]);
4781     SDValue St =
4782         DAG.getStore(Root, dl, Val, Add, PtrInfo, Alignment, MMOFlags, AAInfo);
4783     Chains[ChainI] = St;
4784   }
4785 
4786   SDValue StoreNode = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4787                                   ArrayRef(Chains.data(), ChainI));
4788   setValue(&I, StoreNode);
4789   DAG.setRoot(StoreNode);
4790 }
4791 
4792 void SelectionDAGBuilder::visitMaskedStore(const CallInst &I,
4793                                            bool IsCompressing) {
4794   SDLoc sdl = getCurSDLoc();
4795 
4796   auto getMaskedStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4797                                Align &Alignment) {
4798     // llvm.masked.store.*(Src0, Ptr, alignment, Mask)
4799     Src0 = I.getArgOperand(0);
4800     Ptr = I.getArgOperand(1);
4801     Alignment = cast<ConstantInt>(I.getArgOperand(2))->getAlignValue();
4802     Mask = I.getArgOperand(3);
4803   };
4804   auto getCompressingStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4805                                     Align &Alignment) {
4806     // llvm.masked.compressstore.*(Src0, Ptr, Mask)
4807     Src0 = I.getArgOperand(0);
4808     Ptr = I.getArgOperand(1);
4809     Mask = I.getArgOperand(2);
4810     Alignment = I.getParamAlign(1).valueOrOne();
4811   };
4812 
4813   Value  *PtrOperand, *MaskOperand, *Src0Operand;
4814   Align Alignment;
4815   if (IsCompressing)
4816     getCompressingStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4817   else
4818     getMaskedStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4819 
4820   SDValue Ptr = getValue(PtrOperand);
4821   SDValue Src0 = getValue(Src0Operand);
4822   SDValue Mask = getValue(MaskOperand);
4823   SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
4824 
4825   EVT VT = Src0.getValueType();
4826 
4827   auto MMOFlags = MachineMemOperand::MOStore;
4828   if (I.hasMetadata(LLVMContext::MD_nontemporal))
4829     MMOFlags |= MachineMemOperand::MONonTemporal;
4830 
4831   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4832       MachinePointerInfo(PtrOperand), MMOFlags,
4833       LocationSize::beforeOrAfterPointer(), Alignment, I.getAAMetadata());
4834 
4835   const auto &TLI = DAG.getTargetLoweringInfo();
4836   const auto &TTI =
4837       TLI.getTargetMachine().getTargetTransformInfo(*I.getFunction());
4838   SDValue StoreNode =
4839       !IsCompressing &&
4840               TTI.hasConditionalLoadStoreForType(I.getArgOperand(0)->getType())
4841           ? TLI.visitMaskedStore(DAG, sdl, getMemoryRoot(), MMO, Ptr, Src0,
4842                                  Mask)
4843           : DAG.getMaskedStore(getMemoryRoot(), sdl, Src0, Ptr, Offset, Mask,
4844                                VT, MMO, ISD::UNINDEXED, /*Truncating=*/false,
4845                                IsCompressing);
4846   DAG.setRoot(StoreNode);
4847   setValue(&I, StoreNode);
4848 }
4849 
4850 // Get a uniform base for the Gather/Scatter intrinsic.
4851 // The first argument of the Gather/Scatter intrinsic is a vector of pointers.
4852 // We try to represent it as a base pointer + vector of indices.
4853 // Usually, the vector of pointers comes from a 'getelementptr' instruction.
4854 // The first operand of the GEP may be a single pointer or a vector of pointers
4855 // Example:
4856 //   %gep.ptr = getelementptr i32, <8 x i32*> %vptr, <8 x i32> %ind
4857 //  or
4858 //   %gep.ptr = getelementptr i32, i32* %ptr,        <8 x i32> %ind
4859 // %res = call <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> %gep.ptr, ..
4860 //
4861 // When the first GEP operand is a single pointer - it is the uniform base we
4862 // are looking for. If first operand of the GEP is a splat vector - we
4863 // extract the splat value and use it as a uniform base.
4864 // In all other cases the function returns 'false'.
4865 static bool getUniformBase(const Value *Ptr, SDValue &Base, SDValue &Index,
4866                            ISD::MemIndexType &IndexType, SDValue &Scale,
4867                            SelectionDAGBuilder *SDB, const BasicBlock *CurBB,
4868                            uint64_t ElemSize) {
4869   SelectionDAG& DAG = SDB->DAG;
4870   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4871   const DataLayout &DL = DAG.getDataLayout();
4872 
4873   assert(Ptr->getType()->isVectorTy() && "Unexpected pointer type");
4874 
4875   // Handle splat constant pointer.
4876   if (auto *C = dyn_cast<Constant>(Ptr)) {
4877     C = C->getSplatValue();
4878     if (!C)
4879       return false;
4880 
4881     Base = SDB->getValue(C);
4882 
4883     ElementCount NumElts = cast<VectorType>(Ptr->getType())->getElementCount();
4884     EVT VT = EVT::getVectorVT(*DAG.getContext(), TLI.getPointerTy(DL), NumElts);
4885     Index = DAG.getConstant(0, SDB->getCurSDLoc(), VT);
4886     IndexType = ISD::SIGNED_SCALED;
4887     Scale = DAG.getTargetConstant(1, SDB->getCurSDLoc(), TLI.getPointerTy(DL));
4888     return true;
4889   }
4890 
4891   const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
4892   if (!GEP || GEP->getParent() != CurBB)
4893     return false;
4894 
4895   if (GEP->getNumOperands() != 2)
4896     return false;
4897 
4898   const Value *BasePtr = GEP->getPointerOperand();
4899   const Value *IndexVal = GEP->getOperand(GEP->getNumOperands() - 1);
4900 
4901   // Make sure the base is scalar and the index is a vector.
4902   if (BasePtr->getType()->isVectorTy() || !IndexVal->getType()->isVectorTy())
4903     return false;
4904 
4905   TypeSize ScaleVal = DL.getTypeAllocSize(GEP->getResultElementType());
4906   if (ScaleVal.isScalable())
4907     return false;
4908 
4909   // Target may not support the required addressing mode.
4910   if (ScaleVal != 1 &&
4911       !TLI.isLegalScaleForGatherScatter(ScaleVal.getFixedValue(), ElemSize))
4912     return false;
4913 
4914   Base = SDB->getValue(BasePtr);
4915   Index = SDB->getValue(IndexVal);
4916   IndexType = ISD::SIGNED_SCALED;
4917 
4918   Scale =
4919       DAG.getTargetConstant(ScaleVal, SDB->getCurSDLoc(), TLI.getPointerTy(DL));
4920   return true;
4921 }
4922 
4923 void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
4924   SDLoc sdl = getCurSDLoc();
4925 
4926   // llvm.masked.scatter.*(Src0, Ptrs, alignment, Mask)
4927   const Value *Ptr = I.getArgOperand(1);
4928   SDValue Src0 = getValue(I.getArgOperand(0));
4929   SDValue Mask = getValue(I.getArgOperand(3));
4930   EVT VT = Src0.getValueType();
4931   Align Alignment = cast<ConstantInt>(I.getArgOperand(2))
4932                         ->getMaybeAlignValue()
4933                         .value_or(DAG.getEVTAlign(VT.getScalarType()));
4934   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4935 
4936   SDValue Base;
4937   SDValue Index;
4938   ISD::MemIndexType IndexType;
4939   SDValue Scale;
4940   bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this,
4941                                     I.getParent(), VT.getScalarStoreSize());
4942 
4943   unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
4944   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4945       MachinePointerInfo(AS), MachineMemOperand::MOStore,
4946       LocationSize::beforeOrAfterPointer(), Alignment, I.getAAMetadata());
4947   if (!UniformBase) {
4948     Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4949     Index = getValue(Ptr);
4950     IndexType = ISD::SIGNED_SCALED;
4951     Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4952   }
4953 
4954   EVT IdxVT = Index.getValueType();
4955   EVT EltTy = IdxVT.getVectorElementType();
4956   if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
4957     EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
4958     Index = DAG.getNode(ISD::SIGN_EXTEND, sdl, NewIdxVT, Index);
4959   }
4960 
4961   SDValue Ops[] = { getMemoryRoot(), Src0, Mask, Base, Index, Scale };
4962   SDValue Scatter = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), VT, sdl,
4963                                          Ops, MMO, IndexType, false);
4964   DAG.setRoot(Scatter);
4965   setValue(&I, Scatter);
4966 }
4967 
4968 void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) {
4969   SDLoc sdl = getCurSDLoc();
4970 
4971   auto getMaskedLoadOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4972                               Align &Alignment) {
4973     // @llvm.masked.load.*(Ptr, alignment, Mask, Src0)
4974     Ptr = I.getArgOperand(0);
4975     Alignment = cast<ConstantInt>(I.getArgOperand(1))->getAlignValue();
4976     Mask = I.getArgOperand(2);
4977     Src0 = I.getArgOperand(3);
4978   };
4979   auto getExpandingLoadOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4980                                  Align &Alignment) {
4981     // @llvm.masked.expandload.*(Ptr, Mask, Src0)
4982     Ptr = I.getArgOperand(0);
4983     Alignment = I.getParamAlign(0).valueOrOne();
4984     Mask = I.getArgOperand(1);
4985     Src0 = I.getArgOperand(2);
4986   };
4987 
4988   Value  *PtrOperand, *MaskOperand, *Src0Operand;
4989   Align Alignment;
4990   if (IsExpanding)
4991     getExpandingLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4992   else
4993     getMaskedLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4994 
4995   SDValue Ptr = getValue(PtrOperand);
4996   SDValue Src0 = getValue(Src0Operand);
4997   SDValue Mask = getValue(MaskOperand);
4998   SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
4999 
5000   EVT VT = Src0.getValueType();
5001   AAMDNodes AAInfo = I.getAAMetadata();
5002   const MDNode *Ranges = getRangeMetadata(I);
5003 
5004   // Do not serialize masked loads of constant memory with anything.
5005   MemoryLocation ML = MemoryLocation::getAfter(PtrOperand, AAInfo);
5006   bool AddToChain = !AA || !AA->pointsToConstantMemory(ML);
5007 
5008   SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
5009 
5010   auto MMOFlags = MachineMemOperand::MOLoad;
5011   if (I.hasMetadata(LLVMContext::MD_nontemporal))
5012     MMOFlags |= MachineMemOperand::MONonTemporal;
5013 
5014   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
5015       MachinePointerInfo(PtrOperand), MMOFlags,
5016       LocationSize::beforeOrAfterPointer(), Alignment, AAInfo, Ranges);
5017 
5018   const auto &TLI = DAG.getTargetLoweringInfo();
5019   const auto &TTI =
5020       TLI.getTargetMachine().getTargetTransformInfo(*I.getFunction());
5021   // The Load/Res may point to different values and both of them are output
5022   // variables.
5023   SDValue Load;
5024   SDValue Res;
5025   if (!IsExpanding &&
5026       TTI.hasConditionalLoadStoreForType(Src0Operand->getType()))
5027     Res = TLI.visitMaskedLoad(DAG, sdl, InChain, MMO, Load, Ptr, Src0, Mask);
5028   else
5029     Res = Load =
5030         DAG.getMaskedLoad(VT, sdl, InChain, Ptr, Offset, Mask, Src0, VT, MMO,
5031                           ISD::UNINDEXED, ISD::NON_EXTLOAD, IsExpanding);
5032   if (AddToChain)
5033     PendingLoads.push_back(Load.getValue(1));
5034   setValue(&I, Res);
5035 }
5036 
5037 void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
5038   SDLoc sdl = getCurSDLoc();
5039 
5040   // @llvm.masked.gather.*(Ptrs, alignment, Mask, Src0)
5041   const Value *Ptr = I.getArgOperand(0);
5042   SDValue Src0 = getValue(I.getArgOperand(3));
5043   SDValue Mask = getValue(I.getArgOperand(2));
5044 
5045   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5046   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5047   Align Alignment = cast<ConstantInt>(I.getArgOperand(1))
5048                         ->getMaybeAlignValue()
5049                         .value_or(DAG.getEVTAlign(VT.getScalarType()));
5050 
5051   const MDNode *Ranges = getRangeMetadata(I);
5052 
5053   SDValue Root = DAG.getRoot();
5054   SDValue Base;
5055   SDValue Index;
5056   ISD::MemIndexType IndexType;
5057   SDValue Scale;
5058   bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this,
5059                                     I.getParent(), VT.getScalarStoreSize());
5060   unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
5061   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
5062       MachinePointerInfo(AS), MachineMemOperand::MOLoad,
5063       LocationSize::beforeOrAfterPointer(), Alignment, I.getAAMetadata(),
5064       Ranges);
5065 
5066   if (!UniformBase) {
5067     Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
5068     Index = getValue(Ptr);
5069     IndexType = ISD::SIGNED_SCALED;
5070     Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
5071   }
5072 
5073   EVT IdxVT = Index.getValueType();
5074   EVT EltTy = IdxVT.getVectorElementType();
5075   if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
5076     EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
5077     Index = DAG.getNode(ISD::SIGN_EXTEND, sdl, NewIdxVT, Index);
5078   }
5079 
5080   SDValue Ops[] = { Root, Src0, Mask, Base, Index, Scale };
5081   SDValue Gather = DAG.getMaskedGather(DAG.getVTList(VT, MVT::Other), VT, sdl,
5082                                        Ops, MMO, IndexType, ISD::NON_EXTLOAD);
5083 
5084   PendingLoads.push_back(Gather.getValue(1));
5085   setValue(&I, Gather);
5086 }
5087 
5088 void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
5089   SDLoc dl = getCurSDLoc();
5090   AtomicOrdering SuccessOrdering = I.getSuccessOrdering();
5091   AtomicOrdering FailureOrdering = I.getFailureOrdering();
5092   SyncScope::ID SSID = I.getSyncScopeID();
5093 
5094   SDValue InChain = getRoot();
5095 
5096   MVT MemVT = getValue(I.getCompareOperand()).getSimpleValueType();
5097   SDVTList VTs = DAG.getVTList(MemVT, MVT::i1, MVT::Other);
5098 
5099   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5100   auto Flags = TLI.getAtomicMemOperandFlags(I, DAG.getDataLayout());
5101 
5102   MachineFunction &MF = DAG.getMachineFunction();
5103   MachineMemOperand *MMO = MF.getMachineMemOperand(
5104       MachinePointerInfo(I.getPointerOperand()), Flags,
5105       LocationSize::precise(MemVT.getStoreSize()), DAG.getEVTAlign(MemVT),
5106       AAMDNodes(), nullptr, SSID, SuccessOrdering, FailureOrdering);
5107 
5108   SDValue L = DAG.getAtomicCmpSwap(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS,
5109                                    dl, MemVT, VTs, InChain,
5110                                    getValue(I.getPointerOperand()),
5111                                    getValue(I.getCompareOperand()),
5112                                    getValue(I.getNewValOperand()), MMO);
5113 
5114   SDValue OutChain = L.getValue(2);
5115 
5116   setValue(&I, L);
5117   DAG.setRoot(OutChain);
5118 }
5119 
5120 void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
5121   SDLoc dl = getCurSDLoc();
5122   ISD::NodeType NT;
5123   switch (I.getOperation()) {
5124   default: llvm_unreachable("Unknown atomicrmw operation");
5125   case AtomicRMWInst::Xchg: NT = ISD::ATOMIC_SWAP; break;
5126   case AtomicRMWInst::Add:  NT = ISD::ATOMIC_LOAD_ADD; break;
5127   case AtomicRMWInst::Sub:  NT = ISD::ATOMIC_LOAD_SUB; break;
5128   case AtomicRMWInst::And:  NT = ISD::ATOMIC_LOAD_AND; break;
5129   case AtomicRMWInst::Nand: NT = ISD::ATOMIC_LOAD_NAND; break;
5130   case AtomicRMWInst::Or:   NT = ISD::ATOMIC_LOAD_OR; break;
5131   case AtomicRMWInst::Xor:  NT = ISD::ATOMIC_LOAD_XOR; break;
5132   case AtomicRMWInst::Max:  NT = ISD::ATOMIC_LOAD_MAX; break;
5133   case AtomicRMWInst::Min:  NT = ISD::ATOMIC_LOAD_MIN; break;
5134   case AtomicRMWInst::UMax: NT = ISD::ATOMIC_LOAD_UMAX; break;
5135   case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break;
5136   case AtomicRMWInst::FAdd: NT = ISD::ATOMIC_LOAD_FADD; break;
5137   case AtomicRMWInst::FSub: NT = ISD::ATOMIC_LOAD_FSUB; break;
5138   case AtomicRMWInst::FMax: NT = ISD::ATOMIC_LOAD_FMAX; break;
5139   case AtomicRMWInst::FMin: NT = ISD::ATOMIC_LOAD_FMIN; break;
5140   case AtomicRMWInst::UIncWrap:
5141     NT = ISD::ATOMIC_LOAD_UINC_WRAP;
5142     break;
5143   case AtomicRMWInst::UDecWrap:
5144     NT = ISD::ATOMIC_LOAD_UDEC_WRAP;
5145     break;
5146   case AtomicRMWInst::USubCond:
5147     NT = ISD::ATOMIC_LOAD_USUB_COND;
5148     break;
5149   case AtomicRMWInst::USubSat:
5150     NT = ISD::ATOMIC_LOAD_USUB_SAT;
5151     break;
5152   }
5153   AtomicOrdering Ordering = I.getOrdering();
5154   SyncScope::ID SSID = I.getSyncScopeID();
5155 
5156   SDValue InChain = getRoot();
5157 
5158   auto MemVT = getValue(I.getValOperand()).getSimpleValueType();
5159   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5160   auto Flags = TLI.getAtomicMemOperandFlags(I, DAG.getDataLayout());
5161 
5162   MachineFunction &MF = DAG.getMachineFunction();
5163   MachineMemOperand *MMO = MF.getMachineMemOperand(
5164       MachinePointerInfo(I.getPointerOperand()), Flags,
5165       LocationSize::precise(MemVT.getStoreSize()), DAG.getEVTAlign(MemVT),
5166       AAMDNodes(), nullptr, SSID, Ordering);
5167 
5168   SDValue L =
5169     DAG.getAtomic(NT, dl, MemVT, InChain,
5170                   getValue(I.getPointerOperand()), getValue(I.getValOperand()),
5171                   MMO);
5172 
5173   SDValue OutChain = L.getValue(1);
5174 
5175   setValue(&I, L);
5176   DAG.setRoot(OutChain);
5177 }
5178 
5179 void SelectionDAGBuilder::visitFence(const FenceInst &I) {
5180   SDLoc dl = getCurSDLoc();
5181   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5182   SDValue Ops[3];
5183   Ops[0] = getRoot();
5184   Ops[1] = DAG.getTargetConstant((unsigned)I.getOrdering(), dl,
5185                                  TLI.getFenceOperandTy(DAG.getDataLayout()));
5186   Ops[2] = DAG.getTargetConstant(I.getSyncScopeID(), dl,
5187                                  TLI.getFenceOperandTy(DAG.getDataLayout()));
5188   SDValue N = DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops);
5189   setValue(&I, N);
5190   DAG.setRoot(N);
5191 }
5192 
5193 void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
5194   SDLoc dl = getCurSDLoc();
5195   AtomicOrdering Order = I.getOrdering();
5196   SyncScope::ID SSID = I.getSyncScopeID();
5197 
5198   SDValue InChain = getRoot();
5199 
5200   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5201   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5202   EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getType());
5203 
5204   if (!TLI.supportsUnalignedAtomics() &&
5205       I.getAlign().value() < MemVT.getSizeInBits() / 8)
5206     report_fatal_error("Cannot generate unaligned atomic load");
5207 
5208   auto Flags = TLI.getLoadMemOperandFlags(I, DAG.getDataLayout(), AC, LibInfo);
5209 
5210   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
5211       MachinePointerInfo(I.getPointerOperand()), Flags,
5212       LocationSize::precise(MemVT.getStoreSize()), I.getAlign(), AAMDNodes(),
5213       nullptr, SSID, Order);
5214 
5215   InChain = TLI.prepareVolatileOrAtomicLoad(InChain, dl, DAG);
5216 
5217   SDValue Ptr = getValue(I.getPointerOperand());
5218   SDValue L = DAG.getAtomic(ISD::ATOMIC_LOAD, dl, MemVT, MemVT, InChain,
5219                             Ptr, MMO);
5220 
5221   SDValue OutChain = L.getValue(1);
5222   if (MemVT != VT)
5223     L = DAG.getPtrExtOrTrunc(L, dl, VT);
5224 
5225   setValue(&I, L);
5226   DAG.setRoot(OutChain);
5227 }
5228 
5229 void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
5230   SDLoc dl = getCurSDLoc();
5231 
5232   AtomicOrdering Ordering = I.getOrdering();
5233   SyncScope::ID SSID = I.getSyncScopeID();
5234 
5235   SDValue InChain = getRoot();
5236 
5237   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5238   EVT MemVT =
5239       TLI.getMemValueType(DAG.getDataLayout(), I.getValueOperand()->getType());
5240 
5241   if (!TLI.supportsUnalignedAtomics() &&
5242       I.getAlign().value() < MemVT.getSizeInBits() / 8)
5243     report_fatal_error("Cannot generate unaligned atomic store");
5244 
5245   auto Flags = TLI.getStoreMemOperandFlags(I, DAG.getDataLayout());
5246 
5247   MachineFunction &MF = DAG.getMachineFunction();
5248   MachineMemOperand *MMO = MF.getMachineMemOperand(
5249       MachinePointerInfo(I.getPointerOperand()), Flags,
5250       LocationSize::precise(MemVT.getStoreSize()), I.getAlign(), AAMDNodes(),
5251       nullptr, SSID, Ordering);
5252 
5253   SDValue Val = getValue(I.getValueOperand());
5254   if (Val.getValueType() != MemVT)
5255     Val = DAG.getPtrExtOrTrunc(Val, dl, MemVT);
5256   SDValue Ptr = getValue(I.getPointerOperand());
5257 
5258   SDValue OutChain =
5259       DAG.getAtomic(ISD::ATOMIC_STORE, dl, MemVT, InChain, Val, Ptr, MMO);
5260 
5261   setValue(&I, OutChain);
5262   DAG.setRoot(OutChain);
5263 }
5264 
5265 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
5266 /// node.
5267 void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
5268                                                unsigned Intrinsic) {
5269   // Ignore the callsite's attributes. A specific call site may be marked with
5270   // readnone, but the lowering code will expect the chain based on the
5271   // definition.
5272   const Function *F = I.getCalledFunction();
5273   bool HasChain = !F->doesNotAccessMemory();
5274   bool OnlyLoad =
5275       HasChain && F->onlyReadsMemory() && F->willReturn() && F->doesNotThrow();
5276 
5277   // Build the operand list.
5278   SmallVector<SDValue, 8> Ops;
5279   if (HasChain) {  // If this intrinsic has side-effects, chainify it.
5280     if (OnlyLoad) {
5281       // We don't need to serialize loads against other loads.
5282       Ops.push_back(DAG.getRoot());
5283     } else {
5284       Ops.push_back(getRoot());
5285     }
5286   }
5287 
5288   // Info is set by getTgtMemIntrinsic
5289   TargetLowering::IntrinsicInfo Info;
5290   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5291   bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I,
5292                                                DAG.getMachineFunction(),
5293                                                Intrinsic);
5294 
5295   // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
5296   if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID ||
5297       Info.opc == ISD::INTRINSIC_W_CHAIN)
5298     Ops.push_back(DAG.getTargetConstant(Intrinsic, getCurSDLoc(),
5299                                         TLI.getPointerTy(DAG.getDataLayout())));
5300 
5301   // Add all operands of the call to the operand list.
5302   for (unsigned i = 0, e = I.arg_size(); i != e; ++i) {
5303     const Value *Arg = I.getArgOperand(i);
5304     if (!I.paramHasAttr(i, Attribute::ImmArg)) {
5305       Ops.push_back(getValue(Arg));
5306       continue;
5307     }
5308 
5309     // Use TargetConstant instead of a regular constant for immarg.
5310     EVT VT = TLI.getValueType(DAG.getDataLayout(), Arg->getType(), true);
5311     if (const ConstantInt *CI = dyn_cast<ConstantInt>(Arg)) {
5312       assert(CI->getBitWidth() <= 64 &&
5313              "large intrinsic immediates not handled");
5314       Ops.push_back(DAG.getTargetConstant(*CI, SDLoc(), VT));
5315     } else {
5316       Ops.push_back(
5317           DAG.getTargetConstantFP(*cast<ConstantFP>(Arg), SDLoc(), VT));
5318     }
5319   }
5320 
5321   SmallVector<EVT, 4> ValueVTs;
5322   ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);
5323 
5324   if (HasChain)
5325     ValueVTs.push_back(MVT::Other);
5326 
5327   SDVTList VTs = DAG.getVTList(ValueVTs);
5328 
5329   // Propagate fast-math-flags from IR to node(s).
5330   SDNodeFlags Flags;
5331   if (auto *FPMO = dyn_cast<FPMathOperator>(&I))
5332     Flags.copyFMF(*FPMO);
5333   SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
5334 
5335   // Create the node.
5336   SDValue Result;
5337 
5338   if (auto Bundle = I.getOperandBundle(LLVMContext::OB_convergencectrl)) {
5339     auto *Token = Bundle->Inputs[0].get();
5340     SDValue ConvControlToken = getValue(Token);
5341     assert(Ops.back().getValueType() != MVT::Glue &&
5342            "Did not expected another glue node here.");
5343     ConvControlToken =
5344         DAG.getNode(ISD::CONVERGENCECTRL_GLUE, {}, MVT::Glue, ConvControlToken);
5345     Ops.push_back(ConvControlToken);
5346   }
5347 
5348   // In some cases, custom collection of operands from CallInst I may be needed.
5349   TLI.CollectTargetIntrinsicOperands(I, Ops, DAG);
5350   if (IsTgtIntrinsic) {
5351     // This is target intrinsic that touches memory
5352     //
5353     // TODO: We currently just fallback to address space 0 if getTgtMemIntrinsic
5354     //       didn't yield anything useful.
5355     MachinePointerInfo MPI;
5356     if (Info.ptrVal)
5357       MPI = MachinePointerInfo(Info.ptrVal, Info.offset);
5358     else if (Info.fallbackAddressSpace)
5359       MPI = MachinePointerInfo(*Info.fallbackAddressSpace);
5360     Result = DAG.getMemIntrinsicNode(Info.opc, getCurSDLoc(), VTs, Ops,
5361                                      Info.memVT, MPI, Info.align, Info.flags,
5362                                      Info.size, I.getAAMetadata());
5363   } else if (!HasChain) {
5364     Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurSDLoc(), VTs, Ops);
5365   } else if (!I.getType()->isVoidTy()) {
5366     Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurSDLoc(), VTs, Ops);
5367   } else {
5368     Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops);
5369   }
5370 
5371   if (HasChain) {
5372     SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
5373     if (OnlyLoad)
5374       PendingLoads.push_back(Chain);
5375     else
5376       DAG.setRoot(Chain);
5377   }
5378 
5379   if (!I.getType()->isVoidTy()) {
5380     if (!isa<VectorType>(I.getType()))
5381       Result = lowerRangeToAssertZExt(DAG, I, Result);
5382 
5383     MaybeAlign Alignment = I.getRetAlign();
5384 
5385     // Insert `assertalign` node if there's an alignment.
5386     if (InsertAssertAlign && Alignment) {
5387       Result =
5388           DAG.getAssertAlign(getCurSDLoc(), Result, Alignment.valueOrOne());
5389     }
5390   }
5391 
5392   setValue(&I, Result);
5393 }
5394 
5395 /// GetSignificand - Get the significand and build it into a floating-point
5396 /// number with exponent of 1:
5397 ///
5398 ///   Op = (Op & 0x007fffff) | 0x3f800000;
5399 ///
5400 /// where Op is the hexadecimal representation of floating point value.
5401 static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl) {
5402   SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
5403                            DAG.getConstant(0x007fffff, dl, MVT::i32));
5404   SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
5405                            DAG.getConstant(0x3f800000, dl, MVT::i32));
5406   return DAG.getNode(ISD::BITCAST, dl, MVT::f32, t2);
5407 }
5408 
5409 /// GetExponent - Get the exponent:
5410 ///
5411 ///   (float)(int)(((Op & 0x7f800000) >> 23) - 127);
5412 ///
5413 /// where Op is the hexadecimal representation of floating point value.
5414 static SDValue GetExponent(SelectionDAG &DAG, SDValue Op,
5415                            const TargetLowering &TLI, const SDLoc &dl) {
5416   SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
5417                            DAG.getConstant(0x7f800000, dl, MVT::i32));
5418   SDValue t1 = DAG.getNode(
5419       ISD::SRL, dl, MVT::i32, t0,
5420       DAG.getConstant(23, dl,
5421                       TLI.getShiftAmountTy(MVT::i32, DAG.getDataLayout())));
5422   SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
5423                            DAG.getConstant(127, dl, MVT::i32));
5424   return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
5425 }
5426 
5427 /// getF32Constant - Get 32-bit floating point constant.
5428 static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt,
5429                               const SDLoc &dl) {
5430   return DAG.getConstantFP(APFloat(APFloat::IEEEsingle(), APInt(32, Flt)), dl,
5431                            MVT::f32);
5432 }
5433 
5434 static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl,
5435                                        SelectionDAG &DAG) {
5436   // TODO: What fast-math-flags should be set on the floating-point nodes?
5437 
5438   //   IntegerPartOfX = ((int32_t)(t0);
5439   SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
5440 
5441   //   FractionalPartOfX = t0 - (float)IntegerPartOfX;
5442   SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
5443   SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
5444 
5445   //   IntegerPartOfX <<= 23;
5446   IntegerPartOfX =
5447       DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
5448                   DAG.getConstant(23, dl,
5449                                   DAG.getTargetLoweringInfo().getShiftAmountTy(
5450                                       MVT::i32, DAG.getDataLayout())));
5451 
5452   SDValue TwoToFractionalPartOfX;
5453   if (LimitFloatPrecision <= 6) {
5454     // For floating-point precision of 6:
5455     //
5456     //   TwoToFractionalPartOfX =
5457     //     0.997535578f +
5458     //       (0.735607626f + 0.252464424f * x) * x;
5459     //
5460     // error 0.0144103317, which is 6 bits
5461     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5462                              getF32Constant(DAG, 0x3e814304, dl));
5463     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5464                              getF32Constant(DAG, 0x3f3c50c8, dl));
5465     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5466     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5467                                          getF32Constant(DAG, 0x3f7f5e7e, dl));
5468   } else if (LimitFloatPrecision <= 12) {
5469     // For floating-point precision of 12:
5470     //
5471     //   TwoToFractionalPartOfX =
5472     //     0.999892986f +
5473     //       (0.696457318f +
5474     //         (0.224338339f + 0.792043434e-1f * x) * x) * x;
5475     //
5476     // error 0.000107046256, which is 13 to 14 bits
5477     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5478                              getF32Constant(DAG, 0x3da235e3, dl));
5479     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5480                              getF32Constant(DAG, 0x3e65b8f3, dl));
5481     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5482     SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5483                              getF32Constant(DAG, 0x3f324b07, dl));
5484     SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5485     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5486                                          getF32Constant(DAG, 0x3f7ff8fd, dl));
5487   } else { // LimitFloatPrecision <= 18
5488     // For floating-point precision of 18:
5489     //
5490     //   TwoToFractionalPartOfX =
5491     //     0.999999982f +
5492     //       (0.693148872f +
5493     //         (0.240227044f +
5494     //           (0.554906021e-1f +
5495     //             (0.961591928e-2f +
5496     //               (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
5497     // error 2.47208000*10^(-7), which is better than 18 bits
5498     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5499                              getF32Constant(DAG, 0x3924b03e, dl));
5500     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5501                              getF32Constant(DAG, 0x3ab24b87, dl));
5502     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5503     SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5504                              getF32Constant(DAG, 0x3c1d8c17, dl));
5505     SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5506     SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5507                              getF32Constant(DAG, 0x3d634a1d, dl));
5508     SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5509     SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5510                              getF32Constant(DAG, 0x3e75fe14, dl));
5511     SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5512     SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
5513                               getF32Constant(DAG, 0x3f317234, dl));
5514     SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
5515     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
5516                                          getF32Constant(DAG, 0x3f800000, dl));
5517   }
5518 
5519   // Add the exponent into the result in integer domain.
5520   SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, TwoToFractionalPartOfX);
5521   return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
5522                      DAG.getNode(ISD::ADD, dl, MVT::i32, t13, IntegerPartOfX));
5523 }
5524 
5525 /// expandExp - Lower an exp intrinsic. Handles the special sequences for
5526 /// limited-precision mode.
5527 static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5528                          const TargetLowering &TLI, SDNodeFlags Flags) {
5529   if (Op.getValueType() == MVT::f32 &&
5530       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5531 
5532     // Put the exponent in the right bit position for later addition to the
5533     // final result:
5534     //
5535     // t0 = Op * log2(e)
5536 
5537     // TODO: What fast-math-flags should be set here?
5538     SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
5539                              DAG.getConstantFP(numbers::log2ef, dl, MVT::f32));
5540     return getLimitedPrecisionExp2(t0, dl, DAG);
5541   }
5542 
5543   // No special expansion.
5544   return DAG.getNode(ISD::FEXP, dl, Op.getValueType(), Op, Flags);
5545 }
5546 
5547 /// expandLog - Lower a log intrinsic. Handles the special sequences for
5548 /// limited-precision mode.
5549 static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5550                          const TargetLowering &TLI, SDNodeFlags Flags) {
5551   // TODO: What fast-math-flags should be set on the floating-point nodes?
5552 
5553   if (Op.getValueType() == MVT::f32 &&
5554       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5555     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5556 
5557     // Scale the exponent by log(2).
5558     SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
5559     SDValue LogOfExponent =
5560         DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
5561                     DAG.getConstantFP(numbers::ln2f, dl, MVT::f32));
5562 
5563     // Get the significand and build it into a floating-point number with
5564     // exponent of 1.
5565     SDValue X = GetSignificand(DAG, Op1, dl);
5566 
5567     SDValue LogOfMantissa;
5568     if (LimitFloatPrecision <= 6) {
5569       // For floating-point precision of 6:
5570       //
5571       //   LogofMantissa =
5572       //     -1.1609546f +
5573       //       (1.4034025f - 0.23903021f * x) * x;
5574       //
5575       // error 0.0034276066, which is better than 8 bits
5576       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5577                                getF32Constant(DAG, 0xbe74c456, dl));
5578       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5579                                getF32Constant(DAG, 0x3fb3a2b1, dl));
5580       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5581       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5582                                   getF32Constant(DAG, 0x3f949a29, dl));
5583     } else if (LimitFloatPrecision <= 12) {
5584       // For floating-point precision of 12:
5585       //
5586       //   LogOfMantissa =
5587       //     -1.7417939f +
5588       //       (2.8212026f +
5589       //         (-1.4699568f +
5590       //           (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
5591       //
5592       // error 0.000061011436, which is 14 bits
5593       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5594                                getF32Constant(DAG, 0xbd67b6d6, dl));
5595       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5596                                getF32Constant(DAG, 0x3ee4f4b8, dl));
5597       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5598       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5599                                getF32Constant(DAG, 0x3fbc278b, dl));
5600       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5601       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5602                                getF32Constant(DAG, 0x40348e95, dl));
5603       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5604       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5605                                   getF32Constant(DAG, 0x3fdef31a, dl));
5606     } else { // LimitFloatPrecision <= 18
5607       // For floating-point precision of 18:
5608       //
5609       //   LogOfMantissa =
5610       //     -2.1072184f +
5611       //       (4.2372794f +
5612       //         (-3.7029485f +
5613       //           (2.2781945f +
5614       //             (-0.87823314f +
5615       //               (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
5616       //
5617       // error 0.0000023660568, which is better than 18 bits
5618       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5619                                getF32Constant(DAG, 0xbc91e5ac, dl));
5620       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5621                                getF32Constant(DAG, 0x3e4350aa, dl));
5622       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5623       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5624                                getF32Constant(DAG, 0x3f60d3e3, dl));
5625       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5626       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5627                                getF32Constant(DAG, 0x4011cdf0, dl));
5628       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5629       SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5630                                getF32Constant(DAG, 0x406cfd1c, dl));
5631       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5632       SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5633                                getF32Constant(DAG, 0x408797cb, dl));
5634       SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5635       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
5636                                   getF32Constant(DAG, 0x4006dcab, dl));
5637     }
5638 
5639     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, LogOfMantissa);
5640   }
5641 
5642   // No special expansion.
5643   return DAG.getNode(ISD::FLOG, dl, Op.getValueType(), Op, Flags);
5644 }
5645 
5646 /// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for
5647 /// limited-precision mode.
5648 static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5649                           const TargetLowering &TLI, SDNodeFlags Flags) {
5650   // TODO: What fast-math-flags should be set on the floating-point nodes?
5651 
5652   if (Op.getValueType() == MVT::f32 &&
5653       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5654     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5655 
5656     // Get the exponent.
5657     SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
5658 
5659     // Get the significand and build it into a floating-point number with
5660     // exponent of 1.
5661     SDValue X = GetSignificand(DAG, Op1, dl);
5662 
5663     // Different possible minimax approximations of significand in
5664     // floating-point for various degrees of accuracy over [1,2].
5665     SDValue Log2ofMantissa;
5666     if (LimitFloatPrecision <= 6) {
5667       // For floating-point precision of 6:
5668       //
5669       //   Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
5670       //
5671       // error 0.0049451742, which is more than 7 bits
5672       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5673                                getF32Constant(DAG, 0xbeb08fe0, dl));
5674       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5675                                getF32Constant(DAG, 0x40019463, dl));
5676       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5677       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5678                                    getF32Constant(DAG, 0x3fd6633d, dl));
5679     } else if (LimitFloatPrecision <= 12) {
5680       // For floating-point precision of 12:
5681       //
5682       //   Log2ofMantissa =
5683       //     -2.51285454f +
5684       //       (4.07009056f +
5685       //         (-2.12067489f +
5686       //           (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
5687       //
5688       // error 0.0000876136000, which is better than 13 bits
5689       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5690                                getF32Constant(DAG, 0xbda7262e, dl));
5691       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5692                                getF32Constant(DAG, 0x3f25280b, dl));
5693       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5694       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5695                                getF32Constant(DAG, 0x4007b923, dl));
5696       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5697       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5698                                getF32Constant(DAG, 0x40823e2f, dl));
5699       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5700       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5701                                    getF32Constant(DAG, 0x4020d29c, dl));
5702     } else { // LimitFloatPrecision <= 18
5703       // For floating-point precision of 18:
5704       //
5705       //   Log2ofMantissa =
5706       //     -3.0400495f +
5707       //       (6.1129976f +
5708       //         (-5.3420409f +
5709       //           (3.2865683f +
5710       //             (-1.2669343f +
5711       //               (0.27515199f -
5712       //                 0.25691327e-1f * x) * x) * x) * x) * x) * x;
5713       //
5714       // error 0.0000018516, which is better than 18 bits
5715       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5716                                getF32Constant(DAG, 0xbcd2769e, dl));
5717       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5718                                getF32Constant(DAG, 0x3e8ce0b9, dl));
5719       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5720       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5721                                getF32Constant(DAG, 0x3fa22ae7, dl));
5722       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5723       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5724                                getF32Constant(DAG, 0x40525723, dl));
5725       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5726       SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5727                                getF32Constant(DAG, 0x40aaf200, dl));
5728       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5729       SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5730                                getF32Constant(DAG, 0x40c39dad, dl));
5731       SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5732       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
5733                                    getF32Constant(DAG, 0x4042902c, dl));
5734     }
5735 
5736     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
5737   }
5738 
5739   // No special expansion.
5740   return DAG.getNode(ISD::FLOG2, dl, Op.getValueType(), Op, Flags);
5741 }
5742 
5743 /// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for
5744 /// limited-precision mode.
5745 static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5746                            const TargetLowering &TLI, SDNodeFlags Flags) {
5747   // TODO: What fast-math-flags should be set on the floating-point nodes?
5748 
5749   if (Op.getValueType() == MVT::f32 &&
5750       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5751     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5752 
5753     // Scale the exponent by log10(2) [0.30102999f].
5754     SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
5755     SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
5756                                         getF32Constant(DAG, 0x3e9a209a, dl));
5757 
5758     // Get the significand and build it into a floating-point number with
5759     // exponent of 1.
5760     SDValue X = GetSignificand(DAG, Op1, dl);
5761 
5762     SDValue Log10ofMantissa;
5763     if (LimitFloatPrecision <= 6) {
5764       // For floating-point precision of 6:
5765       //
5766       //   Log10ofMantissa =
5767       //     -0.50419619f +
5768       //       (0.60948995f - 0.10380950f * x) * x;
5769       //
5770       // error 0.0014886165, which is 6 bits
5771       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5772                                getF32Constant(DAG, 0xbdd49a13, dl));
5773       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5774                                getF32Constant(DAG, 0x3f1c0789, dl));
5775       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5776       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5777                                     getF32Constant(DAG, 0x3f011300, dl));
5778     } else if (LimitFloatPrecision <= 12) {
5779       // For floating-point precision of 12:
5780       //
5781       //   Log10ofMantissa =
5782       //     -0.64831180f +
5783       //       (0.91751397f +
5784       //         (-0.31664806f + 0.47637168e-1f * x) * x) * x;
5785       //
5786       // error 0.00019228036, which is better than 12 bits
5787       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5788                                getF32Constant(DAG, 0x3d431f31, dl));
5789       SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
5790                                getF32Constant(DAG, 0x3ea21fb2, dl));
5791       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5792       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5793                                getF32Constant(DAG, 0x3f6ae232, dl));
5794       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5795       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
5796                                     getF32Constant(DAG, 0x3f25f7c3, dl));
5797     } else { // LimitFloatPrecision <= 18
5798       // For floating-point precision of 18:
5799       //
5800       //   Log10ofMantissa =
5801       //     -0.84299375f +
5802       //       (1.5327582f +
5803       //         (-1.0688956f +
5804       //           (0.49102474f +
5805       //             (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
5806       //
5807       // error 0.0000037995730, which is better than 18 bits
5808       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5809                                getF32Constant(DAG, 0x3c5d51ce, dl));
5810       SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
5811                                getF32Constant(DAG, 0x3e00685a, dl));
5812       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5813       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5814                                getF32Constant(DAG, 0x3efb6798, dl));
5815       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5816       SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
5817                                getF32Constant(DAG, 0x3f88d192, dl));
5818       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5819       SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5820                                getF32Constant(DAG, 0x3fc4316c, dl));
5821       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5822       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
5823                                     getF32Constant(DAG, 0x3f57ce70, dl));
5824     }
5825 
5826     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
5827   }
5828 
5829   // No special expansion.
5830   return DAG.getNode(ISD::FLOG10, dl, Op.getValueType(), Op, Flags);
5831 }
5832 
5833 /// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for
5834 /// limited-precision mode.
5835 static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5836                           const TargetLowering &TLI, SDNodeFlags Flags) {
5837   if (Op.getValueType() == MVT::f32 &&
5838       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18)
5839     return getLimitedPrecisionExp2(Op, dl, DAG);
5840 
5841   // No special expansion.
5842   return DAG.getNode(ISD::FEXP2, dl, Op.getValueType(), Op, Flags);
5843 }
5844 
5845 /// visitPow - Lower a pow intrinsic. Handles the special sequences for
5846 /// limited-precision mode with x == 10.0f.
5847 static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS,
5848                          SelectionDAG &DAG, const TargetLowering &TLI,
5849                          SDNodeFlags Flags) {
5850   bool IsExp10 = false;
5851   if (LHS.getValueType() == MVT::f32 && RHS.getValueType() == MVT::f32 &&
5852       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5853     if (ConstantFPSDNode *LHSC = dyn_cast<ConstantFPSDNode>(LHS)) {
5854       APFloat Ten(10.0f);
5855       IsExp10 = LHSC->isExactlyValue(Ten);
5856     }
5857   }
5858 
5859   // TODO: What fast-math-flags should be set on the FMUL node?
5860   if (IsExp10) {
5861     // Put the exponent in the right bit position for later addition to the
5862     // final result:
5863     //
5864     //   #define LOG2OF10 3.3219281f
5865     //   t0 = Op * LOG2OF10;
5866     SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, RHS,
5867                              getF32Constant(DAG, 0x40549a78, dl));
5868     return getLimitedPrecisionExp2(t0, dl, DAG);
5869   }
5870 
5871   // No special expansion.
5872   return DAG.getNode(ISD::FPOW, dl, LHS.getValueType(), LHS, RHS, Flags);
5873 }
5874 
5875 /// ExpandPowI - Expand a llvm.powi intrinsic.
5876 static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS,
5877                           SelectionDAG &DAG) {
5878   // If RHS is a constant, we can expand this out to a multiplication tree if
5879   // it's beneficial on the target, otherwise we end up lowering to a call to
5880   // __powidf2 (for example).
5881   if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
5882     unsigned Val = RHSC->getSExtValue();
5883 
5884     // powi(x, 0) -> 1.0
5885     if (Val == 0)
5886       return DAG.getConstantFP(1.0, DL, LHS.getValueType());
5887 
5888     if (DAG.getTargetLoweringInfo().isBeneficialToExpandPowI(
5889             Val, DAG.shouldOptForSize())) {
5890       // Get the exponent as a positive value.
5891       if ((int)Val < 0)
5892         Val = -Val;
5893       // We use the simple binary decomposition method to generate the multiply
5894       // sequence.  There are more optimal ways to do this (for example,
5895       // powi(x,15) generates one more multiply than it should), but this has
5896       // the benefit of being both really simple and much better than a libcall.
5897       SDValue Res; // Logically starts equal to 1.0
5898       SDValue CurSquare = LHS;
5899       // TODO: Intrinsics should have fast-math-flags that propagate to these
5900       // nodes.
5901       while (Val) {
5902         if (Val & 1) {
5903           if (Res.getNode())
5904             Res =
5905                 DAG.getNode(ISD::FMUL, DL, Res.getValueType(), Res, CurSquare);
5906           else
5907             Res = CurSquare; // 1.0*CurSquare.
5908         }
5909 
5910         CurSquare = DAG.getNode(ISD::FMUL, DL, CurSquare.getValueType(),
5911                                 CurSquare, CurSquare);
5912         Val >>= 1;
5913       }
5914 
5915       // If the original was negative, invert the result, producing 1/(x*x*x).
5916       if (RHSC->getSExtValue() < 0)
5917         Res = DAG.getNode(ISD::FDIV, DL, LHS.getValueType(),
5918                           DAG.getConstantFP(1.0, DL, LHS.getValueType()), Res);
5919       return Res;
5920     }
5921   }
5922 
5923   // Otherwise, expand to a libcall.
5924   return DAG.getNode(ISD::FPOWI, DL, LHS.getValueType(), LHS, RHS);
5925 }
5926 
5927 static SDValue expandDivFix(unsigned Opcode, const SDLoc &DL,
5928                             SDValue LHS, SDValue RHS, SDValue Scale,
5929                             SelectionDAG &DAG, const TargetLowering &TLI) {
5930   EVT VT = LHS.getValueType();
5931   bool Signed = Opcode == ISD::SDIVFIX || Opcode == ISD::SDIVFIXSAT;
5932   bool Saturating = Opcode == ISD::SDIVFIXSAT || Opcode == ISD::UDIVFIXSAT;
5933   LLVMContext &Ctx = *DAG.getContext();
5934 
5935   // If the type is legal but the operation isn't, this node might survive all
5936   // the way to operation legalization. If we end up there and we do not have
5937   // the ability to widen the type (if VT*2 is not legal), we cannot expand the
5938   // node.
5939 
5940   // Coax the legalizer into expanding the node during type legalization instead
5941   // by bumping the size by one bit. This will force it to Promote, enabling the
5942   // early expansion and avoiding the need to expand later.
5943 
5944   // We don't have to do this if Scale is 0; that can always be expanded, unless
5945   // it's a saturating signed operation. Those can experience true integer
5946   // division overflow, a case which we must avoid.
5947 
5948   // FIXME: We wouldn't have to do this (or any of the early
5949   // expansion/promotion) if it was possible to expand a libcall of an
5950   // illegal type during operation legalization. But it's not, so things
5951   // get a bit hacky.
5952   unsigned ScaleInt = Scale->getAsZExtVal();
5953   if ((ScaleInt > 0 || (Saturating && Signed)) &&
5954       (TLI.isTypeLegal(VT) ||
5955        (VT.isVector() && TLI.isTypeLegal(VT.getVectorElementType())))) {
5956     TargetLowering::LegalizeAction Action = TLI.getFixedPointOperationAction(
5957         Opcode, VT, ScaleInt);
5958     if (Action != TargetLowering::Legal && Action != TargetLowering::Custom) {
5959       EVT PromVT;
5960       if (VT.isScalarInteger())
5961         PromVT = EVT::getIntegerVT(Ctx, VT.getSizeInBits() + 1);
5962       else if (VT.isVector()) {
5963         PromVT = VT.getVectorElementType();
5964         PromVT = EVT::getIntegerVT(Ctx, PromVT.getSizeInBits() + 1);
5965         PromVT = EVT::getVectorVT(Ctx, PromVT, VT.getVectorElementCount());
5966       } else
5967         llvm_unreachable("Wrong VT for DIVFIX?");
5968       LHS = DAG.getExtOrTrunc(Signed, LHS, DL, PromVT);
5969       RHS = DAG.getExtOrTrunc(Signed, RHS, DL, PromVT);
5970       EVT ShiftTy = TLI.getShiftAmountTy(PromVT, DAG.getDataLayout());
5971       // For saturating operations, we need to shift up the LHS to get the
5972       // proper saturation width, and then shift down again afterwards.
5973       if (Saturating)
5974         LHS = DAG.getNode(ISD::SHL, DL, PromVT, LHS,
5975                           DAG.getConstant(1, DL, ShiftTy));
5976       SDValue Res = DAG.getNode(Opcode, DL, PromVT, LHS, RHS, Scale);
5977       if (Saturating)
5978         Res = DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, PromVT, Res,
5979                           DAG.getConstant(1, DL, ShiftTy));
5980       return DAG.getZExtOrTrunc(Res, DL, VT);
5981     }
5982   }
5983 
5984   return DAG.getNode(Opcode, DL, VT, LHS, RHS, Scale);
5985 }
5986 
5987 // getUnderlyingArgRegs - Find underlying registers used for a truncated,
5988 // bitcasted, or split argument. Returns a list of <Register, size in bits>
5989 static void
5990 getUnderlyingArgRegs(SmallVectorImpl<std::pair<Register, TypeSize>> &Regs,
5991                      const SDValue &N) {
5992   switch (N.getOpcode()) {
5993   case ISD::CopyFromReg: {
5994     SDValue Op = N.getOperand(1);
5995     Regs.emplace_back(cast<RegisterSDNode>(Op)->getReg(),
5996                       Op.getValueType().getSizeInBits());
5997     return;
5998   }
5999   case ISD::BITCAST:
6000   case ISD::AssertZext:
6001   case ISD::AssertSext:
6002   case ISD::TRUNCATE:
6003     getUnderlyingArgRegs(Regs, N.getOperand(0));
6004     return;
6005   case ISD::BUILD_PAIR:
6006   case ISD::BUILD_VECTOR:
6007   case ISD::CONCAT_VECTORS:
6008     for (SDValue Op : N->op_values())
6009       getUnderlyingArgRegs(Regs, Op);
6010     return;
6011   default:
6012     return;
6013   }
6014 }
6015 
6016 /// If the DbgValueInst is a dbg_value of a function argument, create the
6017 /// corresponding DBG_VALUE machine instruction for it now.  At the end of
6018 /// instruction selection, they will be inserted to the entry BB.
6019 /// We don't currently support this for variadic dbg_values, as they shouldn't
6020 /// appear for function arguments or in the prologue.
6021 bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
6022     const Value *V, DILocalVariable *Variable, DIExpression *Expr,
6023     DILocation *DL, FuncArgumentDbgValueKind Kind, const SDValue &N) {
6024   const Argument *Arg = dyn_cast<Argument>(V);
6025   if (!Arg)
6026     return false;
6027 
6028   MachineFunction &MF = DAG.getMachineFunction();
6029   const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
6030 
6031   // Helper to create DBG_INSTR_REFs or DBG_VALUEs, depending on what kind
6032   // we've been asked to pursue.
6033   auto MakeVRegDbgValue = [&](Register Reg, DIExpression *FragExpr,
6034                               bool Indirect) {
6035     if (Reg.isVirtual() && MF.useDebugInstrRef()) {
6036       // For VRegs, in instruction referencing mode, create a DBG_INSTR_REF
6037       // pointing at the VReg, which will be patched up later.
6038       auto &Inst = TII->get(TargetOpcode::DBG_INSTR_REF);
6039       SmallVector<MachineOperand, 1> MOs({MachineOperand::CreateReg(
6040           /* Reg */ Reg, /* isDef */ false, /* isImp */ false,
6041           /* isKill */ false, /* isDead */ false,
6042           /* isUndef */ false, /* isEarlyClobber */ false,
6043           /* SubReg */ 0, /* isDebug */ true)});
6044 
6045       auto *NewDIExpr = FragExpr;
6046       // We don't have an "Indirect" field in DBG_INSTR_REF, fold that into
6047       // the DIExpression.
6048       if (Indirect)
6049         NewDIExpr = DIExpression::prepend(FragExpr, DIExpression::DerefBefore);
6050       SmallVector<uint64_t, 2> Ops({dwarf::DW_OP_LLVM_arg, 0});
6051       NewDIExpr = DIExpression::prependOpcodes(NewDIExpr, Ops);
6052       return BuildMI(MF, DL, Inst, false, MOs, Variable, NewDIExpr);
6053     } else {
6054       // Create a completely standard DBG_VALUE.
6055       auto &Inst = TII->get(TargetOpcode::DBG_VALUE);
6056       return BuildMI(MF, DL, Inst, Indirect, Reg, Variable, FragExpr);
6057     }
6058   };
6059 
6060   if (Kind == FuncArgumentDbgValueKind::Value) {
6061     // ArgDbgValues are hoisted to the beginning of the entry block. So we
6062     // should only emit as ArgDbgValue if the dbg.value intrinsic is found in
6063     // the entry block.
6064     bool IsInEntryBlock = FuncInfo.MBB == &FuncInfo.MF->front();
6065     if (!IsInEntryBlock)
6066       return false;
6067 
6068     // ArgDbgValues are hoisted to the beginning of the entry block.  So we
6069     // should only emit as ArgDbgValue if the dbg.value intrinsic describes a
6070     // variable that also is a param.
6071     //
6072     // Although, if we are at the top of the entry block already, we can still
6073     // emit using ArgDbgValue. This might catch some situations when the
6074     // dbg.value refers to an argument that isn't used in the entry block, so
6075     // any CopyToReg node would be optimized out and the only way to express
6076     // this DBG_VALUE is by using the physical reg (or FI) as done in this
6077     // method.  ArgDbgValues are hoisted to the beginning of the entry block. So
6078     // we should only emit as ArgDbgValue if the Variable is an argument to the
6079     // current function, and the dbg.value intrinsic is found in the entry
6080     // block.
6081     bool VariableIsFunctionInputArg = Variable->isParameter() &&
6082         !DL->getInlinedAt();
6083     bool IsInPrologue = SDNodeOrder == LowestSDNodeOrder;
6084     if (!IsInPrologue && !VariableIsFunctionInputArg)
6085       return false;
6086 
6087     // Here we assume that a function argument on IR level only can be used to
6088     // describe one input parameter on source level. If we for example have
6089     // source code like this
6090     //
6091     //    struct A { long x, y; };
6092     //    void foo(struct A a, long b) {
6093     //      ...
6094     //      b = a.x;
6095     //      ...
6096     //    }
6097     //
6098     // and IR like this
6099     //
6100     //  define void @foo(i32 %a1, i32 %a2, i32 %b)  {
6101     //  entry:
6102     //    call void @llvm.dbg.value(metadata i32 %a1, "a", DW_OP_LLVM_fragment
6103     //    call void @llvm.dbg.value(metadata i32 %a2, "a", DW_OP_LLVM_fragment
6104     //    call void @llvm.dbg.value(metadata i32 %b, "b",
6105     //    ...
6106     //    call void @llvm.dbg.value(metadata i32 %a1, "b"
6107     //    ...
6108     //
6109     // then the last dbg.value is describing a parameter "b" using a value that
6110     // is an argument. But since we already has used %a1 to describe a parameter
6111     // we should not handle that last dbg.value here (that would result in an
6112     // incorrect hoisting of the DBG_VALUE to the function entry).
6113     // Notice that we allow one dbg.value per IR level argument, to accommodate
6114     // for the situation with fragments above.
6115     // If there is no node for the value being handled, we return true to skip
6116     // the normal generation of debug info, as it would kill existing debug
6117     // info for the parameter in case of duplicates.
6118     if (VariableIsFunctionInputArg) {
6119       unsigned ArgNo = Arg->getArgNo();
6120       if (ArgNo >= FuncInfo.DescribedArgs.size())
6121         FuncInfo.DescribedArgs.resize(ArgNo + 1, false);
6122       else if (!IsInPrologue && FuncInfo.DescribedArgs.test(ArgNo))
6123         return !NodeMap[V].getNode();
6124       FuncInfo.DescribedArgs.set(ArgNo);
6125     }
6126   }
6127 
6128   bool IsIndirect = false;
6129   std::optional<MachineOperand> Op;
6130   // Some arguments' frame index is recorded during argument lowering.
6131   int FI = FuncInfo.getArgumentFrameIndex(Arg);
6132   if (FI != std::numeric_limits<int>::max())
6133     Op = MachineOperand::CreateFI(FI);
6134 
6135   SmallVector<std::pair<Register, TypeSize>, 8> ArgRegsAndSizes;
6136   if (!Op && N.getNode()) {
6137     getUnderlyingArgRegs(ArgRegsAndSizes, N);
6138     Register Reg;
6139     if (ArgRegsAndSizes.size() == 1)
6140       Reg = ArgRegsAndSizes.front().first;
6141 
6142     if (Reg && Reg.isVirtual()) {
6143       MachineRegisterInfo &RegInfo = MF.getRegInfo();
6144       Register PR = RegInfo.getLiveInPhysReg(Reg);
6145       if (PR)
6146         Reg = PR;
6147     }
6148     if (Reg) {
6149       Op = MachineOperand::CreateReg(Reg, false);
6150       IsIndirect = Kind != FuncArgumentDbgValueKind::Value;
6151     }
6152   }
6153 
6154   if (!Op && N.getNode()) {
6155     // Check if frame index is available.
6156     SDValue LCandidate = peekThroughBitcasts(N);
6157     if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(LCandidate.getNode()))
6158       if (FrameIndexSDNode *FINode =
6159           dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
6160         Op = MachineOperand::CreateFI(FINode->getIndex());
6161   }
6162 
6163   if (!Op) {
6164     // Create a DBG_VALUE for each decomposed value in ArgRegs to cover Reg
6165     auto splitMultiRegDbgValue = [&](ArrayRef<std::pair<Register, TypeSize>>
6166                                          SplitRegs) {
6167       unsigned Offset = 0;
6168       for (const auto &RegAndSize : SplitRegs) {
6169         // If the expression is already a fragment, the current register
6170         // offset+size might extend beyond the fragment. In this case, only
6171         // the register bits that are inside the fragment are relevant.
6172         int RegFragmentSizeInBits = RegAndSize.second;
6173         if (auto ExprFragmentInfo = Expr->getFragmentInfo()) {
6174           uint64_t ExprFragmentSizeInBits = ExprFragmentInfo->SizeInBits;
6175           // The register is entirely outside the expression fragment,
6176           // so is irrelevant for debug info.
6177           if (Offset >= ExprFragmentSizeInBits)
6178             break;
6179           // The register is partially outside the expression fragment, only
6180           // the low bits within the fragment are relevant for debug info.
6181           if (Offset + RegFragmentSizeInBits > ExprFragmentSizeInBits) {
6182             RegFragmentSizeInBits = ExprFragmentSizeInBits - Offset;
6183           }
6184         }
6185 
6186         auto FragmentExpr = DIExpression::createFragmentExpression(
6187             Expr, Offset, RegFragmentSizeInBits);
6188         Offset += RegAndSize.second;
6189         // If a valid fragment expression cannot be created, the variable's
6190         // correct value cannot be determined and so it is set as Undef.
6191         if (!FragmentExpr) {
6192           SDDbgValue *SDV = DAG.getConstantDbgValue(
6193               Variable, Expr, UndefValue::get(V->getType()), DL, SDNodeOrder);
6194           DAG.AddDbgValue(SDV, false);
6195           continue;
6196         }
6197         MachineInstr *NewMI =
6198             MakeVRegDbgValue(RegAndSize.first, *FragmentExpr,
6199                              Kind != FuncArgumentDbgValueKind::Value);
6200         FuncInfo.ArgDbgValues.push_back(NewMI);
6201       }
6202     };
6203 
6204     // Check if ValueMap has reg number.
6205     DenseMap<const Value *, Register>::const_iterator
6206       VMI = FuncInfo.ValueMap.find(V);
6207     if (VMI != FuncInfo.ValueMap.end()) {
6208       const auto &TLI = DAG.getTargetLoweringInfo();
6209       RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), VMI->second,
6210                        V->getType(), std::nullopt);
6211       if (RFV.occupiesMultipleRegs()) {
6212         splitMultiRegDbgValue(RFV.getRegsAndSizes());
6213         return true;
6214       }
6215 
6216       Op = MachineOperand::CreateReg(VMI->second, false);
6217       IsIndirect = Kind != FuncArgumentDbgValueKind::Value;
6218     } else if (ArgRegsAndSizes.size() > 1) {
6219       // This was split due to the calling convention, and no virtual register
6220       // mapping exists for the value.
6221       splitMultiRegDbgValue(ArgRegsAndSizes);
6222       return true;
6223     }
6224   }
6225 
6226   if (!Op)
6227     return false;
6228 
6229   assert(Variable->isValidLocationForIntrinsic(DL) &&
6230          "Expected inlined-at fields to agree");
6231   MachineInstr *NewMI = nullptr;
6232 
6233   if (Op->isReg())
6234     NewMI = MakeVRegDbgValue(Op->getReg(), Expr, IsIndirect);
6235   else
6236     NewMI = BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), true, *Op,
6237                     Variable, Expr);
6238 
6239   // Otherwise, use ArgDbgValues.
6240   FuncInfo.ArgDbgValues.push_back(NewMI);
6241   return true;
6242 }
6243 
6244 /// Return the appropriate SDDbgValue based on N.
6245 SDDbgValue *SelectionDAGBuilder::getDbgValue(SDValue N,
6246                                              DILocalVariable *Variable,
6247                                              DIExpression *Expr,
6248                                              const DebugLoc &dl,
6249                                              unsigned DbgSDNodeOrder) {
6250   if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) {
6251     // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can describe
6252     // stack slot locations.
6253     //
6254     // Consider "int x = 0; int *px = &x;". There are two kinds of interesting
6255     // debug values here after optimization:
6256     //
6257     //   dbg.value(i32* %px, !"int *px", !DIExpression()), and
6258     //   dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref))
6259     //
6260     // Both describe the direct values of their associated variables.
6261     return DAG.getFrameIndexDbgValue(Variable, Expr, FISDN->getIndex(),
6262                                      /*IsIndirect*/ false, dl, DbgSDNodeOrder);
6263   }
6264   return DAG.getDbgValue(Variable, Expr, N.getNode(), N.getResNo(),
6265                          /*IsIndirect*/ false, dl, DbgSDNodeOrder);
6266 }
6267 
6268 static unsigned FixedPointIntrinsicToOpcode(unsigned Intrinsic) {
6269   switch (Intrinsic) {
6270   case Intrinsic::smul_fix:
6271     return ISD::SMULFIX;
6272   case Intrinsic::umul_fix:
6273     return ISD::UMULFIX;
6274   case Intrinsic::smul_fix_sat:
6275     return ISD::SMULFIXSAT;
6276   case Intrinsic::umul_fix_sat:
6277     return ISD::UMULFIXSAT;
6278   case Intrinsic::sdiv_fix:
6279     return ISD::SDIVFIX;
6280   case Intrinsic::udiv_fix:
6281     return ISD::UDIVFIX;
6282   case Intrinsic::sdiv_fix_sat:
6283     return ISD::SDIVFIXSAT;
6284   case Intrinsic::udiv_fix_sat:
6285     return ISD::UDIVFIXSAT;
6286   default:
6287     llvm_unreachable("Unhandled fixed point intrinsic");
6288   }
6289 }
6290 
6291 void SelectionDAGBuilder::lowerCallToExternalSymbol(const CallInst &I,
6292                                            const char *FunctionName) {
6293   assert(FunctionName && "FunctionName must not be nullptr");
6294   SDValue Callee = DAG.getExternalSymbol(
6295       FunctionName,
6296       DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()));
6297   LowerCallTo(I, Callee, I.isTailCall(), I.isMustTailCall());
6298 }
6299 
6300 /// Given a @llvm.call.preallocated.setup, return the corresponding
6301 /// preallocated call.
6302 static const CallBase *FindPreallocatedCall(const Value *PreallocatedSetup) {
6303   assert(cast<CallBase>(PreallocatedSetup)
6304                  ->getCalledFunction()
6305                  ->getIntrinsicID() == Intrinsic::call_preallocated_setup &&
6306          "expected call_preallocated_setup Value");
6307   for (const auto *U : PreallocatedSetup->users()) {
6308     auto *UseCall = cast<CallBase>(U);
6309     const Function *Fn = UseCall->getCalledFunction();
6310     if (!Fn || Fn->getIntrinsicID() != Intrinsic::call_preallocated_arg) {
6311       return UseCall;
6312     }
6313   }
6314   llvm_unreachable("expected corresponding call to preallocated setup/arg");
6315 }
6316 
6317 /// If DI is a debug value with an EntryValue expression, lower it using the
6318 /// corresponding physical register of the associated Argument value
6319 /// (guaranteed to exist by the verifier).
6320 bool SelectionDAGBuilder::visitEntryValueDbgValue(
6321     ArrayRef<const Value *> Values, DILocalVariable *Variable,
6322     DIExpression *Expr, DebugLoc DbgLoc) {
6323   if (!Expr->isEntryValue() || !hasSingleElement(Values))
6324     return false;
6325 
6326   // These properties are guaranteed by the verifier.
6327   const Argument *Arg = cast<Argument>(Values[0]);
6328   assert(Arg->hasAttribute(Attribute::AttrKind::SwiftAsync));
6329 
6330   auto ArgIt = FuncInfo.ValueMap.find(Arg);
6331   if (ArgIt == FuncInfo.ValueMap.end()) {
6332     LLVM_DEBUG(
6333         dbgs() << "Dropping dbg.value: expression is entry_value but "
6334                   "couldn't find an associated register for the Argument\n");
6335     return true;
6336   }
6337   Register ArgVReg = ArgIt->getSecond();
6338 
6339   for (auto [PhysReg, VirtReg] : FuncInfo.RegInfo->liveins())
6340     if (ArgVReg == VirtReg || ArgVReg == PhysReg) {
6341       SDDbgValue *SDV = DAG.getVRegDbgValue(
6342           Variable, Expr, PhysReg, false /*IsIndidrect*/, DbgLoc, SDNodeOrder);
6343       DAG.AddDbgValue(SDV, false /*treat as dbg.declare byval parameter*/);
6344       return true;
6345     }
6346   LLVM_DEBUG(dbgs() << "Dropping dbg.value: expression is entry_value but "
6347                        "couldn't find a physical register\n");
6348   return true;
6349 }
6350 
6351 /// Lower the call to the specified intrinsic function.
6352 void SelectionDAGBuilder::visitConvergenceControl(const CallInst &I,
6353                                                   unsigned Intrinsic) {
6354   SDLoc sdl = getCurSDLoc();
6355   switch (Intrinsic) {
6356   case Intrinsic::experimental_convergence_anchor:
6357     setValue(&I, DAG.getNode(ISD::CONVERGENCECTRL_ANCHOR, sdl, MVT::Untyped));
6358     break;
6359   case Intrinsic::experimental_convergence_entry:
6360     setValue(&I, DAG.getNode(ISD::CONVERGENCECTRL_ENTRY, sdl, MVT::Untyped));
6361     break;
6362   case Intrinsic::experimental_convergence_loop: {
6363     auto Bundle = I.getOperandBundle(LLVMContext::OB_convergencectrl);
6364     auto *Token = Bundle->Inputs[0].get();
6365     setValue(&I, DAG.getNode(ISD::CONVERGENCECTRL_LOOP, sdl, MVT::Untyped,
6366                              getValue(Token)));
6367     break;
6368   }
6369   }
6370 }
6371 
6372 void SelectionDAGBuilder::visitVectorHistogram(const CallInst &I,
6373                                                unsigned IntrinsicID) {
6374   // For now, we're only lowering an 'add' histogram.
6375   // We can add others later, e.g. saturating adds, min/max.
6376   assert(IntrinsicID == Intrinsic::experimental_vector_histogram_add &&
6377          "Tried to lower unsupported histogram type");
6378   SDLoc sdl = getCurSDLoc();
6379   Value *Ptr = I.getOperand(0);
6380   SDValue Inc = getValue(I.getOperand(1));
6381   SDValue Mask = getValue(I.getOperand(2));
6382 
6383   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6384   DataLayout TargetDL = DAG.getDataLayout();
6385   EVT VT = Inc.getValueType();
6386   Align Alignment = DAG.getEVTAlign(VT);
6387 
6388   const MDNode *Ranges = getRangeMetadata(I);
6389 
6390   SDValue Root = DAG.getRoot();
6391   SDValue Base;
6392   SDValue Index;
6393   ISD::MemIndexType IndexType;
6394   SDValue Scale;
6395   bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this,
6396                                     I.getParent(), VT.getScalarStoreSize());
6397 
6398   unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
6399 
6400   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
6401       MachinePointerInfo(AS),
6402       MachineMemOperand::MOLoad | MachineMemOperand::MOStore,
6403       MemoryLocation::UnknownSize, Alignment, I.getAAMetadata(), Ranges);
6404 
6405   if (!UniformBase) {
6406     Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
6407     Index = getValue(Ptr);
6408     IndexType = ISD::SIGNED_SCALED;
6409     Scale =
6410         DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
6411   }
6412 
6413   EVT IdxVT = Index.getValueType();
6414   EVT EltTy = IdxVT.getVectorElementType();
6415   if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
6416     EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
6417     Index = DAG.getNode(ISD::SIGN_EXTEND, sdl, NewIdxVT, Index);
6418   }
6419 
6420   SDValue ID = DAG.getTargetConstant(IntrinsicID, sdl, MVT::i32);
6421 
6422   SDValue Ops[] = {Root, Inc, Mask, Base, Index, Scale, ID};
6423   SDValue Histogram = DAG.getMaskedHistogram(DAG.getVTList(MVT::Other), VT, sdl,
6424                                              Ops, MMO, IndexType);
6425 
6426   setValue(&I, Histogram);
6427   DAG.setRoot(Histogram);
6428 }
6429 
6430 void SelectionDAGBuilder::visitVectorExtractLastActive(const CallInst &I,
6431                                                        unsigned Intrinsic) {
6432   assert(Intrinsic == Intrinsic::experimental_vector_extract_last_active &&
6433          "Tried lowering invalid vector extract last");
6434   SDLoc sdl = getCurSDLoc();
6435   SDValue Data = getValue(I.getOperand(0));
6436   SDValue Mask = getValue(I.getOperand(1));
6437   SDValue PassThru = getValue(I.getOperand(2));
6438 
6439   EVT DataVT = Data.getValueType();
6440   EVT ScalarVT = PassThru.getValueType();
6441   EVT BoolVT = Mask.getValueType().getScalarType();
6442 
6443   // Find a suitable type for a stepvector.
6444   ConstantRange VScaleRange(1, /*isFullSet=*/true); // Dummy value.
6445   if (DataVT.isScalableVector())
6446     VScaleRange = getVScaleRange(I.getCaller(), 64);
6447   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6448   unsigned EltWidth = TLI.getBitWidthForCttzElements(
6449       I.getType(), DataVT.getVectorElementCount(), /*ZeroIsPoison=*/true,
6450       &VScaleRange);
6451   MVT StepVT = MVT::getIntegerVT(EltWidth);
6452   EVT StepVecVT = DataVT.changeVectorElementType(StepVT);
6453 
6454   // Zero out lanes with inactive elements, then find the highest remaining
6455   // value from the stepvector.
6456   SDValue Zeroes = DAG.getConstant(0, sdl, StepVecVT);
6457   SDValue StepVec = DAG.getStepVector(sdl, StepVecVT);
6458   SDValue ActiveElts = DAG.getSelect(sdl, StepVecVT, Mask, StepVec, Zeroes);
6459   SDValue HighestIdx =
6460       DAG.getNode(ISD::VECREDUCE_UMAX, sdl, StepVT, ActiveElts);
6461 
6462   // Extract the corresponding lane from the data vector
6463   EVT ExtVT = TLI.getVectorIdxTy(DAG.getDataLayout());
6464   SDValue Idx = DAG.getZExtOrTrunc(HighestIdx, sdl, ExtVT);
6465   SDValue Extract =
6466       DAG.getNode(ISD::EXTRACT_VECTOR_ELT, sdl, ScalarVT, Data, Idx);
6467 
6468   // If all mask lanes were inactive, choose the passthru value instead.
6469   SDValue AnyActive = DAG.getNode(ISD::VECREDUCE_OR, sdl, BoolVT, Mask);
6470   SDValue Result = DAG.getSelect(sdl, ScalarVT, AnyActive, Extract, PassThru);
6471   setValue(&I, Result);
6472 }
6473 
6474 /// Lower the call to the specified intrinsic function.
6475 void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
6476                                              unsigned Intrinsic) {
6477   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6478   SDLoc sdl = getCurSDLoc();
6479   DebugLoc dl = getCurDebugLoc();
6480   SDValue Res;
6481 
6482   SDNodeFlags Flags;
6483   if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
6484     Flags.copyFMF(*FPOp);
6485 
6486   switch (Intrinsic) {
6487   default:
6488     // By default, turn this into a target intrinsic node.
6489     visitTargetIntrinsic(I, Intrinsic);
6490     return;
6491   case Intrinsic::vscale: {
6492     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6493     setValue(&I, DAG.getVScale(sdl, VT, APInt(VT.getSizeInBits(), 1)));
6494     return;
6495   }
6496   case Intrinsic::vastart:  visitVAStart(I); return;
6497   case Intrinsic::vaend:    visitVAEnd(I); return;
6498   case Intrinsic::vacopy:   visitVACopy(I); return;
6499   case Intrinsic::returnaddress:
6500     setValue(&I, DAG.getNode(ISD::RETURNADDR, sdl,
6501                              TLI.getValueType(DAG.getDataLayout(), I.getType()),
6502                              getValue(I.getArgOperand(0))));
6503     return;
6504   case Intrinsic::addressofreturnaddress:
6505     setValue(&I,
6506              DAG.getNode(ISD::ADDROFRETURNADDR, sdl,
6507                          TLI.getValueType(DAG.getDataLayout(), I.getType())));
6508     return;
6509   case Intrinsic::sponentry:
6510     setValue(&I,
6511              DAG.getNode(ISD::SPONENTRY, sdl,
6512                          TLI.getValueType(DAG.getDataLayout(), I.getType())));
6513     return;
6514   case Intrinsic::frameaddress:
6515     setValue(&I, DAG.getNode(ISD::FRAMEADDR, sdl,
6516                              TLI.getFrameIndexTy(DAG.getDataLayout()),
6517                              getValue(I.getArgOperand(0))));
6518     return;
6519   case Intrinsic::read_volatile_register:
6520   case Intrinsic::read_register: {
6521     Value *Reg = I.getArgOperand(0);
6522     SDValue Chain = getRoot();
6523     SDValue RegName =
6524         DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
6525     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6526     Res = DAG.getNode(ISD::READ_REGISTER, sdl,
6527       DAG.getVTList(VT, MVT::Other), Chain, RegName);
6528     setValue(&I, Res);
6529     DAG.setRoot(Res.getValue(1));
6530     return;
6531   }
6532   case Intrinsic::write_register: {
6533     Value *Reg = I.getArgOperand(0);
6534     Value *RegValue = I.getArgOperand(1);
6535     SDValue Chain = getRoot();
6536     SDValue RegName =
6537         DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
6538     DAG.setRoot(DAG.getNode(ISD::WRITE_REGISTER, sdl, MVT::Other, Chain,
6539                             RegName, getValue(RegValue)));
6540     return;
6541   }
6542   case Intrinsic::memcpy: {
6543     const auto &MCI = cast<MemCpyInst>(I);
6544     SDValue Op1 = getValue(I.getArgOperand(0));
6545     SDValue Op2 = getValue(I.getArgOperand(1));
6546     SDValue Op3 = getValue(I.getArgOperand(2));
6547     // @llvm.memcpy defines 0 and 1 to both mean no alignment.
6548     Align DstAlign = MCI.getDestAlign().valueOrOne();
6549     Align SrcAlign = MCI.getSourceAlign().valueOrOne();
6550     Align Alignment = std::min(DstAlign, SrcAlign);
6551     bool isVol = MCI.isVolatile();
6552     // FIXME: Support passing different dest/src alignments to the memcpy DAG
6553     // node.
6554     SDValue Root = isVol ? getRoot() : getMemoryRoot();
6555     SDValue MC = DAG.getMemcpy(Root, sdl, Op1, Op2, Op3, Alignment, isVol,
6556                                /* AlwaysInline */ false, &I, std::nullopt,
6557                                MachinePointerInfo(I.getArgOperand(0)),
6558                                MachinePointerInfo(I.getArgOperand(1)),
6559                                I.getAAMetadata(), AA);
6560     updateDAGForMaybeTailCall(MC);
6561     return;
6562   }
6563   case Intrinsic::memcpy_inline: {
6564     const auto &MCI = cast<MemCpyInlineInst>(I);
6565     SDValue Dst = getValue(I.getArgOperand(0));
6566     SDValue Src = getValue(I.getArgOperand(1));
6567     SDValue Size = getValue(I.getArgOperand(2));
6568     assert(isa<ConstantSDNode>(Size) && "memcpy_inline needs constant size");
6569     // @llvm.memcpy.inline defines 0 and 1 to both mean no alignment.
6570     Align DstAlign = MCI.getDestAlign().valueOrOne();
6571     Align SrcAlign = MCI.getSourceAlign().valueOrOne();
6572     Align Alignment = std::min(DstAlign, SrcAlign);
6573     bool isVol = MCI.isVolatile();
6574     // FIXME: Support passing different dest/src alignments to the memcpy DAG
6575     // node.
6576     SDValue MC = DAG.getMemcpy(getRoot(), sdl, Dst, Src, Size, Alignment, isVol,
6577                                /* AlwaysInline */ true, &I, std::nullopt,
6578                                MachinePointerInfo(I.getArgOperand(0)),
6579                                MachinePointerInfo(I.getArgOperand(1)),
6580                                I.getAAMetadata(), AA);
6581     updateDAGForMaybeTailCall(MC);
6582     return;
6583   }
6584   case Intrinsic::memset: {
6585     const auto &MSI = cast<MemSetInst>(I);
6586     SDValue Op1 = getValue(I.getArgOperand(0));
6587     SDValue Op2 = getValue(I.getArgOperand(1));
6588     SDValue Op3 = getValue(I.getArgOperand(2));
6589     // @llvm.memset defines 0 and 1 to both mean no alignment.
6590     Align Alignment = MSI.getDestAlign().valueOrOne();
6591     bool isVol = MSI.isVolatile();
6592     SDValue Root = isVol ? getRoot() : getMemoryRoot();
6593     SDValue MS = DAG.getMemset(
6594         Root, sdl, Op1, Op2, Op3, Alignment, isVol, /* AlwaysInline */ false,
6595         &I, MachinePointerInfo(I.getArgOperand(0)), I.getAAMetadata());
6596     updateDAGForMaybeTailCall(MS);
6597     return;
6598   }
6599   case Intrinsic::memset_inline: {
6600     const auto &MSII = cast<MemSetInlineInst>(I);
6601     SDValue Dst = getValue(I.getArgOperand(0));
6602     SDValue Value = getValue(I.getArgOperand(1));
6603     SDValue Size = getValue(I.getArgOperand(2));
6604     assert(isa<ConstantSDNode>(Size) && "memset_inline needs constant size");
6605     // @llvm.memset defines 0 and 1 to both mean no alignment.
6606     Align DstAlign = MSII.getDestAlign().valueOrOne();
6607     bool isVol = MSII.isVolatile();
6608     SDValue Root = isVol ? getRoot() : getMemoryRoot();
6609     SDValue MC = DAG.getMemset(Root, sdl, Dst, Value, Size, DstAlign, isVol,
6610                                /* AlwaysInline */ true, &I,
6611                                MachinePointerInfo(I.getArgOperand(0)),
6612                                I.getAAMetadata());
6613     updateDAGForMaybeTailCall(MC);
6614     return;
6615   }
6616   case Intrinsic::memmove: {
6617     const auto &MMI = cast<MemMoveInst>(I);
6618     SDValue Op1 = getValue(I.getArgOperand(0));
6619     SDValue Op2 = getValue(I.getArgOperand(1));
6620     SDValue Op3 = getValue(I.getArgOperand(2));
6621     // @llvm.memmove defines 0 and 1 to both mean no alignment.
6622     Align DstAlign = MMI.getDestAlign().valueOrOne();
6623     Align SrcAlign = MMI.getSourceAlign().valueOrOne();
6624     Align Alignment = std::min(DstAlign, SrcAlign);
6625     bool isVol = MMI.isVolatile();
6626     // FIXME: Support passing different dest/src alignments to the memmove DAG
6627     // node.
6628     SDValue Root = isVol ? getRoot() : getMemoryRoot();
6629     SDValue MM = DAG.getMemmove(Root, sdl, Op1, Op2, Op3, Alignment, isVol, &I,
6630                                 /* OverrideTailCall */ std::nullopt,
6631                                 MachinePointerInfo(I.getArgOperand(0)),
6632                                 MachinePointerInfo(I.getArgOperand(1)),
6633                                 I.getAAMetadata(), AA);
6634     updateDAGForMaybeTailCall(MM);
6635     return;
6636   }
6637   case Intrinsic::memcpy_element_unordered_atomic: {
6638     const AtomicMemCpyInst &MI = cast<AtomicMemCpyInst>(I);
6639     SDValue Dst = getValue(MI.getRawDest());
6640     SDValue Src = getValue(MI.getRawSource());
6641     SDValue Length = getValue(MI.getLength());
6642 
6643     Type *LengthTy = MI.getLength()->getType();
6644     unsigned ElemSz = MI.getElementSizeInBytes();
6645     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6646     SDValue MC =
6647         DAG.getAtomicMemcpy(getRoot(), sdl, Dst, Src, Length, LengthTy, ElemSz,
6648                             isTC, MachinePointerInfo(MI.getRawDest()),
6649                             MachinePointerInfo(MI.getRawSource()));
6650     updateDAGForMaybeTailCall(MC);
6651     return;
6652   }
6653   case Intrinsic::memmove_element_unordered_atomic: {
6654     auto &MI = cast<AtomicMemMoveInst>(I);
6655     SDValue Dst = getValue(MI.getRawDest());
6656     SDValue Src = getValue(MI.getRawSource());
6657     SDValue Length = getValue(MI.getLength());
6658 
6659     Type *LengthTy = MI.getLength()->getType();
6660     unsigned ElemSz = MI.getElementSizeInBytes();
6661     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6662     SDValue MC =
6663         DAG.getAtomicMemmove(getRoot(), sdl, Dst, Src, Length, LengthTy, ElemSz,
6664                              isTC, MachinePointerInfo(MI.getRawDest()),
6665                              MachinePointerInfo(MI.getRawSource()));
6666     updateDAGForMaybeTailCall(MC);
6667     return;
6668   }
6669   case Intrinsic::memset_element_unordered_atomic: {
6670     auto &MI = cast<AtomicMemSetInst>(I);
6671     SDValue Dst = getValue(MI.getRawDest());
6672     SDValue Val = getValue(MI.getValue());
6673     SDValue Length = getValue(MI.getLength());
6674 
6675     Type *LengthTy = MI.getLength()->getType();
6676     unsigned ElemSz = MI.getElementSizeInBytes();
6677     bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6678     SDValue MC =
6679         DAG.getAtomicMemset(getRoot(), sdl, Dst, Val, Length, LengthTy, ElemSz,
6680                             isTC, MachinePointerInfo(MI.getRawDest()));
6681     updateDAGForMaybeTailCall(MC);
6682     return;
6683   }
6684   case Intrinsic::call_preallocated_setup: {
6685     const CallBase *PreallocatedCall = FindPreallocatedCall(&I);
6686     SDValue SrcValue = DAG.getSrcValue(PreallocatedCall);
6687     SDValue Res = DAG.getNode(ISD::PREALLOCATED_SETUP, sdl, MVT::Other,
6688                               getRoot(), SrcValue);
6689     setValue(&I, Res);
6690     DAG.setRoot(Res);
6691     return;
6692   }
6693   case Intrinsic::call_preallocated_arg: {
6694     const CallBase *PreallocatedCall = FindPreallocatedCall(I.getOperand(0));
6695     SDValue SrcValue = DAG.getSrcValue(PreallocatedCall);
6696     SDValue Ops[3];
6697     Ops[0] = getRoot();
6698     Ops[1] = SrcValue;
6699     Ops[2] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(1)), sdl,
6700                                    MVT::i32); // arg index
6701     SDValue Res = DAG.getNode(
6702         ISD::PREALLOCATED_ARG, sdl,
6703         DAG.getVTList(TLI.getPointerTy(DAG.getDataLayout()), MVT::Other), Ops);
6704     setValue(&I, Res);
6705     DAG.setRoot(Res.getValue(1));
6706     return;
6707   }
6708   case Intrinsic::dbg_declare: {
6709     const auto &DI = cast<DbgDeclareInst>(I);
6710     // Debug intrinsics are handled separately in assignment tracking mode.
6711     // Some intrinsics are handled right after Argument lowering.
6712     if (AssignmentTrackingEnabled ||
6713         FuncInfo.PreprocessedDbgDeclares.count(&DI))
6714       return;
6715     LLVM_DEBUG(dbgs() << "SelectionDAG visiting dbg_declare: " << DI << "\n");
6716     DILocalVariable *Variable = DI.getVariable();
6717     DIExpression *Expression = DI.getExpression();
6718     dropDanglingDebugInfo(Variable, Expression);
6719     // Assume dbg.declare can not currently use DIArgList, i.e.
6720     // it is non-variadic.
6721     assert(!DI.hasArgList() && "Only dbg.value should currently use DIArgList");
6722     handleDebugDeclare(DI.getVariableLocationOp(0), Variable, Expression,
6723                        DI.getDebugLoc());
6724     return;
6725   }
6726   case Intrinsic::dbg_label: {
6727     const DbgLabelInst &DI = cast<DbgLabelInst>(I);
6728     DILabel *Label = DI.getLabel();
6729     assert(Label && "Missing label");
6730 
6731     SDDbgLabel *SDV;
6732     SDV = DAG.getDbgLabel(Label, dl, SDNodeOrder);
6733     DAG.AddDbgLabel(SDV);
6734     return;
6735   }
6736   case Intrinsic::dbg_assign: {
6737     // Debug intrinsics are handled separately in assignment tracking mode.
6738     if (AssignmentTrackingEnabled)
6739       return;
6740     // If assignment tracking hasn't been enabled then fall through and treat
6741     // the dbg.assign as a dbg.value.
6742     [[fallthrough]];
6743   }
6744   case Intrinsic::dbg_value: {
6745     // Debug intrinsics are handled separately in assignment tracking mode.
6746     if (AssignmentTrackingEnabled)
6747       return;
6748     const DbgValueInst &DI = cast<DbgValueInst>(I);
6749     assert(DI.getVariable() && "Missing variable");
6750 
6751     DILocalVariable *Variable = DI.getVariable();
6752     DIExpression *Expression = DI.getExpression();
6753     dropDanglingDebugInfo(Variable, Expression);
6754 
6755     if (DI.isKillLocation()) {
6756       handleKillDebugValue(Variable, Expression, DI.getDebugLoc(), SDNodeOrder);
6757       return;
6758     }
6759 
6760     SmallVector<Value *, 4> Values(DI.getValues());
6761     if (Values.empty())
6762       return;
6763 
6764     bool IsVariadic = DI.hasArgList();
6765     if (!handleDebugValue(Values, Variable, Expression, DI.getDebugLoc(),
6766                           SDNodeOrder, IsVariadic))
6767       addDanglingDebugInfo(Values, Variable, Expression, IsVariadic,
6768                            DI.getDebugLoc(), SDNodeOrder);
6769     return;
6770   }
6771 
6772   case Intrinsic::eh_typeid_for: {
6773     // Find the type id for the given typeinfo.
6774     GlobalValue *GV = ExtractTypeInfo(I.getArgOperand(0));
6775     unsigned TypeID = DAG.getMachineFunction().getTypeIDFor(GV);
6776     Res = DAG.getConstant(TypeID, sdl, MVT::i32);
6777     setValue(&I, Res);
6778     return;
6779   }
6780 
6781   case Intrinsic::eh_return_i32:
6782   case Intrinsic::eh_return_i64:
6783     DAG.getMachineFunction().setCallsEHReturn(true);
6784     DAG.setRoot(DAG.getNode(ISD::EH_RETURN, sdl,
6785                             MVT::Other,
6786                             getControlRoot(),
6787                             getValue(I.getArgOperand(0)),
6788                             getValue(I.getArgOperand(1))));
6789     return;
6790   case Intrinsic::eh_unwind_init:
6791     DAG.getMachineFunction().setCallsUnwindInit(true);
6792     return;
6793   case Intrinsic::eh_dwarf_cfa:
6794     setValue(&I, DAG.getNode(ISD::EH_DWARF_CFA, sdl,
6795                              TLI.getPointerTy(DAG.getDataLayout()),
6796                              getValue(I.getArgOperand(0))));
6797     return;
6798   case Intrinsic::eh_sjlj_callsite: {
6799     ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(0));
6800     assert(FuncInfo.getCurrentCallSite() == 0 && "Overlapping call sites!");
6801 
6802     FuncInfo.setCurrentCallSite(CI->getZExtValue());
6803     return;
6804   }
6805   case Intrinsic::eh_sjlj_functioncontext: {
6806     // Get and store the index of the function context.
6807     MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
6808     AllocaInst *FnCtx =
6809       cast<AllocaInst>(I.getArgOperand(0)->stripPointerCasts());
6810     int FI = FuncInfo.StaticAllocaMap[FnCtx];
6811     MFI.setFunctionContextIndex(FI);
6812     return;
6813   }
6814   case Intrinsic::eh_sjlj_setjmp: {
6815     SDValue Ops[2];
6816     Ops[0] = getRoot();
6817     Ops[1] = getValue(I.getArgOperand(0));
6818     SDValue Op = DAG.getNode(ISD::EH_SJLJ_SETJMP, sdl,
6819                              DAG.getVTList(MVT::i32, MVT::Other), Ops);
6820     setValue(&I, Op.getValue(0));
6821     DAG.setRoot(Op.getValue(1));
6822     return;
6823   }
6824   case Intrinsic::eh_sjlj_longjmp:
6825     DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, sdl, MVT::Other,
6826                             getRoot(), getValue(I.getArgOperand(0))));
6827     return;
6828   case Intrinsic::eh_sjlj_setup_dispatch:
6829     DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_SETUP_DISPATCH, sdl, MVT::Other,
6830                             getRoot()));
6831     return;
6832   case Intrinsic::masked_gather:
6833     visitMaskedGather(I);
6834     return;
6835   case Intrinsic::masked_load:
6836     visitMaskedLoad(I);
6837     return;
6838   case Intrinsic::masked_scatter:
6839     visitMaskedScatter(I);
6840     return;
6841   case Intrinsic::masked_store:
6842     visitMaskedStore(I);
6843     return;
6844   case Intrinsic::masked_expandload:
6845     visitMaskedLoad(I, true /* IsExpanding */);
6846     return;
6847   case Intrinsic::masked_compressstore:
6848     visitMaskedStore(I, true /* IsCompressing */);
6849     return;
6850   case Intrinsic::powi:
6851     setValue(&I, ExpandPowI(sdl, getValue(I.getArgOperand(0)),
6852                             getValue(I.getArgOperand(1)), DAG));
6853     return;
6854   case Intrinsic::log:
6855     setValue(&I, expandLog(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6856     return;
6857   case Intrinsic::log2:
6858     setValue(&I,
6859              expandLog2(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6860     return;
6861   case Intrinsic::log10:
6862     setValue(&I,
6863              expandLog10(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6864     return;
6865   case Intrinsic::exp:
6866     setValue(&I, expandExp(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6867     return;
6868   case Intrinsic::exp2:
6869     setValue(&I,
6870              expandExp2(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6871     return;
6872   case Intrinsic::pow:
6873     setValue(&I, expandPow(sdl, getValue(I.getArgOperand(0)),
6874                            getValue(I.getArgOperand(1)), DAG, TLI, Flags));
6875     return;
6876   case Intrinsic::sqrt:
6877   case Intrinsic::fabs:
6878   case Intrinsic::sin:
6879   case Intrinsic::cos:
6880   case Intrinsic::tan:
6881   case Intrinsic::asin:
6882   case Intrinsic::acos:
6883   case Intrinsic::atan:
6884   case Intrinsic::sinh:
6885   case Intrinsic::cosh:
6886   case Intrinsic::tanh:
6887   case Intrinsic::exp10:
6888   case Intrinsic::floor:
6889   case Intrinsic::ceil:
6890   case Intrinsic::trunc:
6891   case Intrinsic::rint:
6892   case Intrinsic::nearbyint:
6893   case Intrinsic::round:
6894   case Intrinsic::roundeven:
6895   case Intrinsic::canonicalize: {
6896     unsigned Opcode;
6897     // clang-format off
6898     switch (Intrinsic) {
6899     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
6900     case Intrinsic::sqrt:         Opcode = ISD::FSQRT;         break;
6901     case Intrinsic::fabs:         Opcode = ISD::FABS;          break;
6902     case Intrinsic::sin:          Opcode = ISD::FSIN;          break;
6903     case Intrinsic::cos:          Opcode = ISD::FCOS;          break;
6904     case Intrinsic::tan:          Opcode = ISD::FTAN;          break;
6905     case Intrinsic::asin:         Opcode = ISD::FASIN;         break;
6906     case Intrinsic::acos:         Opcode = ISD::FACOS;         break;
6907     case Intrinsic::atan:         Opcode = ISD::FATAN;         break;
6908     case Intrinsic::sinh:         Opcode = ISD::FSINH;         break;
6909     case Intrinsic::cosh:         Opcode = ISD::FCOSH;         break;
6910     case Intrinsic::tanh:         Opcode = ISD::FTANH;         break;
6911     case Intrinsic::exp10:        Opcode = ISD::FEXP10;        break;
6912     case Intrinsic::floor:        Opcode = ISD::FFLOOR;        break;
6913     case Intrinsic::ceil:         Opcode = ISD::FCEIL;         break;
6914     case Intrinsic::trunc:        Opcode = ISD::FTRUNC;        break;
6915     case Intrinsic::rint:         Opcode = ISD::FRINT;         break;
6916     case Intrinsic::nearbyint:    Opcode = ISD::FNEARBYINT;    break;
6917     case Intrinsic::round:        Opcode = ISD::FROUND;        break;
6918     case Intrinsic::roundeven:    Opcode = ISD::FROUNDEVEN;    break;
6919     case Intrinsic::canonicalize: Opcode = ISD::FCANONICALIZE; break;
6920     }
6921     // clang-format on
6922 
6923     setValue(&I, DAG.getNode(Opcode, sdl,
6924                              getValue(I.getArgOperand(0)).getValueType(),
6925                              getValue(I.getArgOperand(0)), Flags));
6926     return;
6927   }
6928   case Intrinsic::atan2:
6929     setValue(&I, DAG.getNode(ISD::FATAN2, sdl,
6930                              getValue(I.getArgOperand(0)).getValueType(),
6931                              getValue(I.getArgOperand(0)),
6932                              getValue(I.getArgOperand(1)), Flags));
6933     return;
6934   case Intrinsic::lround:
6935   case Intrinsic::llround:
6936   case Intrinsic::lrint:
6937   case Intrinsic::llrint: {
6938     unsigned Opcode;
6939     // clang-format off
6940     switch (Intrinsic) {
6941     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
6942     case Intrinsic::lround:  Opcode = ISD::LROUND;  break;
6943     case Intrinsic::llround: Opcode = ISD::LLROUND; break;
6944     case Intrinsic::lrint:   Opcode = ISD::LRINT;   break;
6945     case Intrinsic::llrint:  Opcode = ISD::LLRINT;  break;
6946     }
6947     // clang-format on
6948 
6949     EVT RetVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6950     setValue(&I, DAG.getNode(Opcode, sdl, RetVT,
6951                              getValue(I.getArgOperand(0))));
6952     return;
6953   }
6954   case Intrinsic::minnum:
6955     setValue(&I, DAG.getNode(ISD::FMINNUM, sdl,
6956                              getValue(I.getArgOperand(0)).getValueType(),
6957                              getValue(I.getArgOperand(0)),
6958                              getValue(I.getArgOperand(1)), Flags));
6959     return;
6960   case Intrinsic::maxnum:
6961     setValue(&I, DAG.getNode(ISD::FMAXNUM, sdl,
6962                              getValue(I.getArgOperand(0)).getValueType(),
6963                              getValue(I.getArgOperand(0)),
6964                              getValue(I.getArgOperand(1)), Flags));
6965     return;
6966   case Intrinsic::minimum:
6967     setValue(&I, DAG.getNode(ISD::FMINIMUM, sdl,
6968                              getValue(I.getArgOperand(0)).getValueType(),
6969                              getValue(I.getArgOperand(0)),
6970                              getValue(I.getArgOperand(1)), Flags));
6971     return;
6972   case Intrinsic::maximum:
6973     setValue(&I, DAG.getNode(ISD::FMAXIMUM, sdl,
6974                              getValue(I.getArgOperand(0)).getValueType(),
6975                              getValue(I.getArgOperand(0)),
6976                              getValue(I.getArgOperand(1)), Flags));
6977     return;
6978   case Intrinsic::minimumnum:
6979     setValue(&I, DAG.getNode(ISD::FMINIMUMNUM, sdl,
6980                              getValue(I.getArgOperand(0)).getValueType(),
6981                              getValue(I.getArgOperand(0)),
6982                              getValue(I.getArgOperand(1)), Flags));
6983     return;
6984   case Intrinsic::maximumnum:
6985     setValue(&I, DAG.getNode(ISD::FMAXIMUMNUM, sdl,
6986                              getValue(I.getArgOperand(0)).getValueType(),
6987                              getValue(I.getArgOperand(0)),
6988                              getValue(I.getArgOperand(1)), Flags));
6989     return;
6990   case Intrinsic::copysign:
6991     setValue(&I, DAG.getNode(ISD::FCOPYSIGN, sdl,
6992                              getValue(I.getArgOperand(0)).getValueType(),
6993                              getValue(I.getArgOperand(0)),
6994                              getValue(I.getArgOperand(1)), Flags));
6995     return;
6996   case Intrinsic::ldexp:
6997     setValue(&I, DAG.getNode(ISD::FLDEXP, sdl,
6998                              getValue(I.getArgOperand(0)).getValueType(),
6999                              getValue(I.getArgOperand(0)),
7000                              getValue(I.getArgOperand(1)), Flags));
7001     return;
7002   case Intrinsic::sincos:
7003   case Intrinsic::frexp: {
7004     unsigned Opcode;
7005     switch (Intrinsic) {
7006     default:
7007       llvm_unreachable("unexpected intrinsic");
7008     case Intrinsic::sincos:
7009       Opcode = ISD::FSINCOS;
7010       break;
7011     case Intrinsic::frexp:
7012       Opcode = ISD::FFREXP;
7013       break;
7014     }
7015     SmallVector<EVT, 2> ValueVTs;
7016     ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);
7017     SDVTList VTs = DAG.getVTList(ValueVTs);
7018     setValue(
7019         &I, DAG.getNode(Opcode, sdl, VTs, getValue(I.getArgOperand(0)), Flags));
7020     return;
7021   }
7022   case Intrinsic::arithmetic_fence: {
7023     setValue(&I, DAG.getNode(ISD::ARITH_FENCE, sdl,
7024                              getValue(I.getArgOperand(0)).getValueType(),
7025                              getValue(I.getArgOperand(0)), Flags));
7026     return;
7027   }
7028   case Intrinsic::fma:
7029     setValue(&I, DAG.getNode(
7030                      ISD::FMA, sdl, getValue(I.getArgOperand(0)).getValueType(),
7031                      getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)),
7032                      getValue(I.getArgOperand(2)), Flags));
7033     return;
7034 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC)                         \
7035   case Intrinsic::INTRINSIC:
7036 #include "llvm/IR/ConstrainedOps.def"
7037     visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(I));
7038     return;
7039 #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
7040 #include "llvm/IR/VPIntrinsics.def"
7041     visitVectorPredicationIntrinsic(cast<VPIntrinsic>(I));
7042     return;
7043   case Intrinsic::fptrunc_round: {
7044     // Get the last argument, the metadata and convert it to an integer in the
7045     // call
7046     Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(1))->getMetadata();
7047     std::optional<RoundingMode> RoundMode =
7048         convertStrToRoundingMode(cast<MDString>(MD)->getString());
7049 
7050     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7051 
7052     // Propagate fast-math-flags from IR to node(s).
7053     SDNodeFlags Flags;
7054     Flags.copyFMF(*cast<FPMathOperator>(&I));
7055     SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
7056 
7057     SDValue Result;
7058     Result = DAG.getNode(
7059         ISD::FPTRUNC_ROUND, sdl, VT, getValue(I.getArgOperand(0)),
7060         DAG.getTargetConstant((int)*RoundMode, sdl, MVT::i32));
7061     setValue(&I, Result);
7062 
7063     return;
7064   }
7065   case Intrinsic::fmuladd: {
7066     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7067     if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
7068         TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT)) {
7069       setValue(&I, DAG.getNode(ISD::FMA, sdl,
7070                                getValue(I.getArgOperand(0)).getValueType(),
7071                                getValue(I.getArgOperand(0)),
7072                                getValue(I.getArgOperand(1)),
7073                                getValue(I.getArgOperand(2)), Flags));
7074     } else {
7075       // TODO: Intrinsic calls should have fast-math-flags.
7076       SDValue Mul = DAG.getNode(
7077           ISD::FMUL, sdl, getValue(I.getArgOperand(0)).getValueType(),
7078           getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)), Flags);
7079       SDValue Add = DAG.getNode(ISD::FADD, sdl,
7080                                 getValue(I.getArgOperand(0)).getValueType(),
7081                                 Mul, getValue(I.getArgOperand(2)), Flags);
7082       setValue(&I, Add);
7083     }
7084     return;
7085   }
7086   case Intrinsic::convert_to_fp16:
7087     setValue(&I, DAG.getNode(ISD::BITCAST, sdl, MVT::i16,
7088                              DAG.getNode(ISD::FP_ROUND, sdl, MVT::f16,
7089                                          getValue(I.getArgOperand(0)),
7090                                          DAG.getTargetConstant(0, sdl,
7091                                                                MVT::i32))));
7092     return;
7093   case Intrinsic::convert_from_fp16:
7094     setValue(&I, DAG.getNode(ISD::FP_EXTEND, sdl,
7095                              TLI.getValueType(DAG.getDataLayout(), I.getType()),
7096                              DAG.getNode(ISD::BITCAST, sdl, MVT::f16,
7097                                          getValue(I.getArgOperand(0)))));
7098     return;
7099   case Intrinsic::fptosi_sat: {
7100     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7101     setValue(&I, DAG.getNode(ISD::FP_TO_SINT_SAT, sdl, VT,
7102                              getValue(I.getArgOperand(0)),
7103                              DAG.getValueType(VT.getScalarType())));
7104     return;
7105   }
7106   case Intrinsic::fptoui_sat: {
7107     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7108     setValue(&I, DAG.getNode(ISD::FP_TO_UINT_SAT, sdl, VT,
7109                              getValue(I.getArgOperand(0)),
7110                              DAG.getValueType(VT.getScalarType())));
7111     return;
7112   }
7113   case Intrinsic::set_rounding:
7114     Res = DAG.getNode(ISD::SET_ROUNDING, sdl, MVT::Other,
7115                       {getRoot(), getValue(I.getArgOperand(0))});
7116     setValue(&I, Res);
7117     DAG.setRoot(Res.getValue(0));
7118     return;
7119   case Intrinsic::is_fpclass: {
7120     const DataLayout DLayout = DAG.getDataLayout();
7121     EVT DestVT = TLI.getValueType(DLayout, I.getType());
7122     EVT ArgVT = TLI.getValueType(DLayout, I.getArgOperand(0)->getType());
7123     FPClassTest Test = static_cast<FPClassTest>(
7124         cast<ConstantInt>(I.getArgOperand(1))->getZExtValue());
7125     MachineFunction &MF = DAG.getMachineFunction();
7126     const Function &F = MF.getFunction();
7127     SDValue Op = getValue(I.getArgOperand(0));
7128     SDNodeFlags Flags;
7129     Flags.setNoFPExcept(
7130         !F.getAttributes().hasFnAttr(llvm::Attribute::StrictFP));
7131     // If ISD::IS_FPCLASS should be expanded, do it right now, because the
7132     // expansion can use illegal types. Making expansion early allows
7133     // legalizing these types prior to selection.
7134     if (!TLI.isOperationLegal(ISD::IS_FPCLASS, ArgVT) &&
7135         !TLI.isOperationCustom(ISD::IS_FPCLASS, ArgVT)) {
7136       SDValue Result = TLI.expandIS_FPCLASS(DestVT, Op, Test, Flags, sdl, DAG);
7137       setValue(&I, Result);
7138       return;
7139     }
7140 
7141     SDValue Check = DAG.getTargetConstant(Test, sdl, MVT::i32);
7142     SDValue V = DAG.getNode(ISD::IS_FPCLASS, sdl, DestVT, {Op, Check}, Flags);
7143     setValue(&I, V);
7144     return;
7145   }
7146   case Intrinsic::get_fpenv: {
7147     const DataLayout DLayout = DAG.getDataLayout();
7148     EVT EnvVT = TLI.getValueType(DLayout, I.getType());
7149     Align TempAlign = DAG.getEVTAlign(EnvVT);
7150     SDValue Chain = getRoot();
7151     // Use GET_FPENV if it is legal or custom. Otherwise use memory-based node
7152     // and temporary storage in stack.
7153     if (TLI.isOperationLegalOrCustom(ISD::GET_FPENV, EnvVT)) {
7154       Res = DAG.getNode(
7155           ISD::GET_FPENV, sdl,
7156           DAG.getVTList(TLI.getValueType(DAG.getDataLayout(), I.getType()),
7157                         MVT::Other),
7158           Chain);
7159     } else {
7160       SDValue Temp = DAG.CreateStackTemporary(EnvVT, TempAlign.value());
7161       int SPFI = cast<FrameIndexSDNode>(Temp.getNode())->getIndex();
7162       auto MPI =
7163           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
7164       MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
7165           MPI, MachineMemOperand::MOStore, LocationSize::beforeOrAfterPointer(),
7166           TempAlign);
7167       Chain = DAG.getGetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
7168       Res = DAG.getLoad(EnvVT, sdl, Chain, Temp, MPI);
7169     }
7170     setValue(&I, Res);
7171     DAG.setRoot(Res.getValue(1));
7172     return;
7173   }
7174   case Intrinsic::set_fpenv: {
7175     const DataLayout DLayout = DAG.getDataLayout();
7176     SDValue Env = getValue(I.getArgOperand(0));
7177     EVT EnvVT = Env.getValueType();
7178     Align TempAlign = DAG.getEVTAlign(EnvVT);
7179     SDValue Chain = getRoot();
7180     // If SET_FPENV is custom or legal, use it. Otherwise use loading
7181     // environment from memory.
7182     if (TLI.isOperationLegalOrCustom(ISD::SET_FPENV, EnvVT)) {
7183       Chain = DAG.getNode(ISD::SET_FPENV, sdl, MVT::Other, Chain, Env);
7184     } else {
7185       // Allocate space in stack, copy environment bits into it and use this
7186       // memory in SET_FPENV_MEM.
7187       SDValue Temp = DAG.CreateStackTemporary(EnvVT, TempAlign.value());
7188       int SPFI = cast<FrameIndexSDNode>(Temp.getNode())->getIndex();
7189       auto MPI =
7190           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
7191       Chain = DAG.getStore(Chain, sdl, Env, Temp, MPI, TempAlign,
7192                            MachineMemOperand::MOStore);
7193       MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
7194           MPI, MachineMemOperand::MOLoad, LocationSize::beforeOrAfterPointer(),
7195           TempAlign);
7196       Chain = DAG.getSetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
7197     }
7198     DAG.setRoot(Chain);
7199     return;
7200   }
7201   case Intrinsic::reset_fpenv:
7202     DAG.setRoot(DAG.getNode(ISD::RESET_FPENV, sdl, MVT::Other, getRoot()));
7203     return;
7204   case Intrinsic::get_fpmode:
7205     Res = DAG.getNode(
7206         ISD::GET_FPMODE, sdl,
7207         DAG.getVTList(TLI.getValueType(DAG.getDataLayout(), I.getType()),
7208                       MVT::Other),
7209         DAG.getRoot());
7210     setValue(&I, Res);
7211     DAG.setRoot(Res.getValue(1));
7212     return;
7213   case Intrinsic::set_fpmode:
7214     Res = DAG.getNode(ISD::SET_FPMODE, sdl, MVT::Other, {DAG.getRoot()},
7215                       getValue(I.getArgOperand(0)));
7216     DAG.setRoot(Res);
7217     return;
7218   case Intrinsic::reset_fpmode: {
7219     Res = DAG.getNode(ISD::RESET_FPMODE, sdl, MVT::Other, getRoot());
7220     DAG.setRoot(Res);
7221     return;
7222   }
7223   case Intrinsic::pcmarker: {
7224     SDValue Tmp = getValue(I.getArgOperand(0));
7225     DAG.setRoot(DAG.getNode(ISD::PCMARKER, sdl, MVT::Other, getRoot(), Tmp));
7226     return;
7227   }
7228   case Intrinsic::readcyclecounter: {
7229     SDValue Op = getRoot();
7230     Res = DAG.getNode(ISD::READCYCLECOUNTER, sdl,
7231                       DAG.getVTList(MVT::i64, MVT::Other), Op);
7232     setValue(&I, Res);
7233     DAG.setRoot(Res.getValue(1));
7234     return;
7235   }
7236   case Intrinsic::readsteadycounter: {
7237     SDValue Op = getRoot();
7238     Res = DAG.getNode(ISD::READSTEADYCOUNTER, sdl,
7239                       DAG.getVTList(MVT::i64, MVT::Other), Op);
7240     setValue(&I, Res);
7241     DAG.setRoot(Res.getValue(1));
7242     return;
7243   }
7244   case Intrinsic::bitreverse:
7245     setValue(&I, DAG.getNode(ISD::BITREVERSE, sdl,
7246                              getValue(I.getArgOperand(0)).getValueType(),
7247                              getValue(I.getArgOperand(0))));
7248     return;
7249   case Intrinsic::bswap:
7250     setValue(&I, DAG.getNode(ISD::BSWAP, sdl,
7251                              getValue(I.getArgOperand(0)).getValueType(),
7252                              getValue(I.getArgOperand(0))));
7253     return;
7254   case Intrinsic::cttz: {
7255     SDValue Arg = getValue(I.getArgOperand(0));
7256     ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
7257     EVT Ty = Arg.getValueType();
7258     setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_UNDEF,
7259                              sdl, Ty, Arg));
7260     return;
7261   }
7262   case Intrinsic::ctlz: {
7263     SDValue Arg = getValue(I.getArgOperand(0));
7264     ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
7265     EVT Ty = Arg.getValueType();
7266     setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_UNDEF,
7267                              sdl, Ty, Arg));
7268     return;
7269   }
7270   case Intrinsic::ctpop: {
7271     SDValue Arg = getValue(I.getArgOperand(0));
7272     EVT Ty = Arg.getValueType();
7273     setValue(&I, DAG.getNode(ISD::CTPOP, sdl, Ty, Arg));
7274     return;
7275   }
7276   case Intrinsic::fshl:
7277   case Intrinsic::fshr: {
7278     bool IsFSHL = Intrinsic == Intrinsic::fshl;
7279     SDValue X = getValue(I.getArgOperand(0));
7280     SDValue Y = getValue(I.getArgOperand(1));
7281     SDValue Z = getValue(I.getArgOperand(2));
7282     EVT VT = X.getValueType();
7283 
7284     if (X == Y) {
7285       auto RotateOpcode = IsFSHL ? ISD::ROTL : ISD::ROTR;
7286       setValue(&I, DAG.getNode(RotateOpcode, sdl, VT, X, Z));
7287     } else {
7288       auto FunnelOpcode = IsFSHL ? ISD::FSHL : ISD::FSHR;
7289       setValue(&I, DAG.getNode(FunnelOpcode, sdl, VT, X, Y, Z));
7290     }
7291     return;
7292   }
7293   case Intrinsic::sadd_sat: {
7294     SDValue Op1 = getValue(I.getArgOperand(0));
7295     SDValue Op2 = getValue(I.getArgOperand(1));
7296     setValue(&I, DAG.getNode(ISD::SADDSAT, sdl, Op1.getValueType(), Op1, Op2));
7297     return;
7298   }
7299   case Intrinsic::uadd_sat: {
7300     SDValue Op1 = getValue(I.getArgOperand(0));
7301     SDValue Op2 = getValue(I.getArgOperand(1));
7302     setValue(&I, DAG.getNode(ISD::UADDSAT, sdl, Op1.getValueType(), Op1, Op2));
7303     return;
7304   }
7305   case Intrinsic::ssub_sat: {
7306     SDValue Op1 = getValue(I.getArgOperand(0));
7307     SDValue Op2 = getValue(I.getArgOperand(1));
7308     setValue(&I, DAG.getNode(ISD::SSUBSAT, sdl, Op1.getValueType(), Op1, Op2));
7309     return;
7310   }
7311   case Intrinsic::usub_sat: {
7312     SDValue Op1 = getValue(I.getArgOperand(0));
7313     SDValue Op2 = getValue(I.getArgOperand(1));
7314     setValue(&I, DAG.getNode(ISD::USUBSAT, sdl, Op1.getValueType(), Op1, Op2));
7315     return;
7316   }
7317   case Intrinsic::sshl_sat: {
7318     SDValue Op1 = getValue(I.getArgOperand(0));
7319     SDValue Op2 = getValue(I.getArgOperand(1));
7320     setValue(&I, DAG.getNode(ISD::SSHLSAT, sdl, Op1.getValueType(), Op1, Op2));
7321     return;
7322   }
7323   case Intrinsic::ushl_sat: {
7324     SDValue Op1 = getValue(I.getArgOperand(0));
7325     SDValue Op2 = getValue(I.getArgOperand(1));
7326     setValue(&I, DAG.getNode(ISD::USHLSAT, sdl, Op1.getValueType(), Op1, Op2));
7327     return;
7328   }
7329   case Intrinsic::smul_fix:
7330   case Intrinsic::umul_fix:
7331   case Intrinsic::smul_fix_sat:
7332   case Intrinsic::umul_fix_sat: {
7333     SDValue Op1 = getValue(I.getArgOperand(0));
7334     SDValue Op2 = getValue(I.getArgOperand(1));
7335     SDValue Op3 = getValue(I.getArgOperand(2));
7336     setValue(&I, DAG.getNode(FixedPointIntrinsicToOpcode(Intrinsic), sdl,
7337                              Op1.getValueType(), Op1, Op2, Op3));
7338     return;
7339   }
7340   case Intrinsic::sdiv_fix:
7341   case Intrinsic::udiv_fix:
7342   case Intrinsic::sdiv_fix_sat:
7343   case Intrinsic::udiv_fix_sat: {
7344     SDValue Op1 = getValue(I.getArgOperand(0));
7345     SDValue Op2 = getValue(I.getArgOperand(1));
7346     SDValue Op3 = getValue(I.getArgOperand(2));
7347     setValue(&I, expandDivFix(FixedPointIntrinsicToOpcode(Intrinsic), sdl,
7348                               Op1, Op2, Op3, DAG, TLI));
7349     return;
7350   }
7351   case Intrinsic::smax: {
7352     SDValue Op1 = getValue(I.getArgOperand(0));
7353     SDValue Op2 = getValue(I.getArgOperand(1));
7354     setValue(&I, DAG.getNode(ISD::SMAX, sdl, Op1.getValueType(), Op1, Op2));
7355     return;
7356   }
7357   case Intrinsic::smin: {
7358     SDValue Op1 = getValue(I.getArgOperand(0));
7359     SDValue Op2 = getValue(I.getArgOperand(1));
7360     setValue(&I, DAG.getNode(ISD::SMIN, sdl, Op1.getValueType(), Op1, Op2));
7361     return;
7362   }
7363   case Intrinsic::umax: {
7364     SDValue Op1 = getValue(I.getArgOperand(0));
7365     SDValue Op2 = getValue(I.getArgOperand(1));
7366     setValue(&I, DAG.getNode(ISD::UMAX, sdl, Op1.getValueType(), Op1, Op2));
7367     return;
7368   }
7369   case Intrinsic::umin: {
7370     SDValue Op1 = getValue(I.getArgOperand(0));
7371     SDValue Op2 = getValue(I.getArgOperand(1));
7372     setValue(&I, DAG.getNode(ISD::UMIN, sdl, Op1.getValueType(), Op1, Op2));
7373     return;
7374   }
7375   case Intrinsic::abs: {
7376     // TODO: Preserve "int min is poison" arg in SDAG?
7377     SDValue Op1 = getValue(I.getArgOperand(0));
7378     setValue(&I, DAG.getNode(ISD::ABS, sdl, Op1.getValueType(), Op1));
7379     return;
7380   }
7381   case Intrinsic::scmp: {
7382     SDValue Op1 = getValue(I.getArgOperand(0));
7383     SDValue Op2 = getValue(I.getArgOperand(1));
7384     EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7385     setValue(&I, DAG.getNode(ISD::SCMP, sdl, DestVT, Op1, Op2));
7386     break;
7387   }
7388   case Intrinsic::ucmp: {
7389     SDValue Op1 = getValue(I.getArgOperand(0));
7390     SDValue Op2 = getValue(I.getArgOperand(1));
7391     EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7392     setValue(&I, DAG.getNode(ISD::UCMP, sdl, DestVT, Op1, Op2));
7393     break;
7394   }
7395   case Intrinsic::stacksave: {
7396     SDValue Op = getRoot();
7397     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7398     Res = DAG.getNode(ISD::STACKSAVE, sdl, DAG.getVTList(VT, MVT::Other), Op);
7399     setValue(&I, Res);
7400     DAG.setRoot(Res.getValue(1));
7401     return;
7402   }
7403   case Intrinsic::stackrestore:
7404     Res = getValue(I.getArgOperand(0));
7405     DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other, getRoot(), Res));
7406     return;
7407   case Intrinsic::get_dynamic_area_offset: {
7408     SDValue Op = getRoot();
7409     EVT PtrTy = TLI.getFrameIndexTy(DAG.getDataLayout());
7410     EVT ResTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
7411     // Result type for @llvm.get.dynamic.area.offset should match PtrTy for
7412     // target.
7413     if (PtrTy.getFixedSizeInBits() < ResTy.getFixedSizeInBits())
7414       report_fatal_error("Wrong result type for @llvm.get.dynamic.area.offset"
7415                          " intrinsic!");
7416     Res = DAG.getNode(ISD::GET_DYNAMIC_AREA_OFFSET, sdl, DAG.getVTList(ResTy),
7417                       Op);
7418     DAG.setRoot(Op);
7419     setValue(&I, Res);
7420     return;
7421   }
7422   case Intrinsic::stackguard: {
7423     MachineFunction &MF = DAG.getMachineFunction();
7424     const Module &M = *MF.getFunction().getParent();
7425     EVT PtrTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
7426     SDValue Chain = getRoot();
7427     if (TLI.useLoadStackGuardNode(M)) {
7428       Res = getLoadStackGuard(DAG, sdl, Chain);
7429       Res = DAG.getPtrExtOrTrunc(Res, sdl, PtrTy);
7430     } else {
7431       const Value *Global = TLI.getSDagStackGuard(M);
7432       Align Align = DAG.getDataLayout().getPrefTypeAlign(Global->getType());
7433       Res = DAG.getLoad(PtrTy, sdl, Chain, getValue(Global),
7434                         MachinePointerInfo(Global, 0), Align,
7435                         MachineMemOperand::MOVolatile);
7436     }
7437     if (TLI.useStackGuardXorFP())
7438       Res = TLI.emitStackGuardXorFP(DAG, Res, sdl);
7439     DAG.setRoot(Chain);
7440     setValue(&I, Res);
7441     return;
7442   }
7443   case Intrinsic::stackprotector: {
7444     // Emit code into the DAG to store the stack guard onto the stack.
7445     MachineFunction &MF = DAG.getMachineFunction();
7446     MachineFrameInfo &MFI = MF.getFrameInfo();
7447     const Module &M = *MF.getFunction().getParent();
7448     SDValue Src, Chain = getRoot();
7449 
7450     if (TLI.useLoadStackGuardNode(M))
7451       Src = getLoadStackGuard(DAG, sdl, Chain);
7452     else
7453       Src = getValue(I.getArgOperand(0));   // The guard's value.
7454 
7455     AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
7456 
7457     int FI = FuncInfo.StaticAllocaMap[Slot];
7458     MFI.setStackProtectorIndex(FI);
7459     EVT PtrTy = TLI.getFrameIndexTy(DAG.getDataLayout());
7460 
7461     SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
7462 
7463     // Store the stack protector onto the stack.
7464     Res = DAG.getStore(
7465         Chain, sdl, Src, FIN,
7466         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
7467         MaybeAlign(), MachineMemOperand::MOVolatile);
7468     setValue(&I, Res);
7469     DAG.setRoot(Res);
7470     return;
7471   }
7472   case Intrinsic::objectsize:
7473     llvm_unreachable("llvm.objectsize.* should have been lowered already");
7474 
7475   case Intrinsic::is_constant:
7476     llvm_unreachable("llvm.is.constant.* should have been lowered already");
7477 
7478   case Intrinsic::annotation:
7479   case Intrinsic::ptr_annotation:
7480   case Intrinsic::launder_invariant_group:
7481   case Intrinsic::strip_invariant_group:
7482     // Drop the intrinsic, but forward the value
7483     setValue(&I, getValue(I.getOperand(0)));
7484     return;
7485 
7486   case Intrinsic::assume:
7487   case Intrinsic::experimental_noalias_scope_decl:
7488   case Intrinsic::var_annotation:
7489   case Intrinsic::sideeffect:
7490     // Discard annotate attributes, noalias scope declarations, assumptions, and
7491     // artificial side-effects.
7492     return;
7493 
7494   case Intrinsic::codeview_annotation: {
7495     // Emit a label associated with this metadata.
7496     MachineFunction &MF = DAG.getMachineFunction();
7497     MCSymbol *Label = MF.getContext().createTempSymbol("annotation", true);
7498     Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(0))->getMetadata();
7499     MF.addCodeViewAnnotation(Label, cast<MDNode>(MD));
7500     Res = DAG.getLabelNode(ISD::ANNOTATION_LABEL, sdl, getRoot(), Label);
7501     DAG.setRoot(Res);
7502     return;
7503   }
7504 
7505   case Intrinsic::init_trampoline: {
7506     const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts());
7507 
7508     SDValue Ops[6];
7509     Ops[0] = getRoot();
7510     Ops[1] = getValue(I.getArgOperand(0));
7511     Ops[2] = getValue(I.getArgOperand(1));
7512     Ops[3] = getValue(I.getArgOperand(2));
7513     Ops[4] = DAG.getSrcValue(I.getArgOperand(0));
7514     Ops[5] = DAG.getSrcValue(F);
7515 
7516     Res = DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other, Ops);
7517 
7518     DAG.setRoot(Res);
7519     return;
7520   }
7521   case Intrinsic::adjust_trampoline:
7522     setValue(&I, DAG.getNode(ISD::ADJUST_TRAMPOLINE, sdl,
7523                              TLI.getPointerTy(DAG.getDataLayout()),
7524                              getValue(I.getArgOperand(0))));
7525     return;
7526   case Intrinsic::gcroot: {
7527     assert(DAG.getMachineFunction().getFunction().hasGC() &&
7528            "only valid in functions with gc specified, enforced by Verifier");
7529     assert(GFI && "implied by previous");
7530     const Value *Alloca = I.getArgOperand(0)->stripPointerCasts();
7531     const Constant *TypeMap = cast<Constant>(I.getArgOperand(1));
7532 
7533     FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
7534     GFI->addStackRoot(FI->getIndex(), TypeMap);
7535     return;
7536   }
7537   case Intrinsic::gcread:
7538   case Intrinsic::gcwrite:
7539     llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
7540   case Intrinsic::get_rounding:
7541     Res = DAG.getNode(ISD::GET_ROUNDING, sdl, {MVT::i32, MVT::Other}, getRoot());
7542     setValue(&I, Res);
7543     DAG.setRoot(Res.getValue(1));
7544     return;
7545 
7546   case Intrinsic::expect:
7547   case Intrinsic::expect_with_probability:
7548     // Just replace __builtin_expect(exp, c) and
7549     // __builtin_expect_with_probability(exp, c, p) with EXP.
7550     setValue(&I, getValue(I.getArgOperand(0)));
7551     return;
7552 
7553   case Intrinsic::ubsantrap:
7554   case Intrinsic::debugtrap:
7555   case Intrinsic::trap: {
7556     StringRef TrapFuncName =
7557         I.getAttributes().getFnAttr("trap-func-name").getValueAsString();
7558     if (TrapFuncName.empty()) {
7559       switch (Intrinsic) {
7560       case Intrinsic::trap:
7561         DAG.setRoot(DAG.getNode(ISD::TRAP, sdl, MVT::Other, getRoot()));
7562         break;
7563       case Intrinsic::debugtrap:
7564         DAG.setRoot(DAG.getNode(ISD::DEBUGTRAP, sdl, MVT::Other, getRoot()));
7565         break;
7566       case Intrinsic::ubsantrap:
7567         DAG.setRoot(DAG.getNode(
7568             ISD::UBSANTRAP, sdl, MVT::Other, getRoot(),
7569             DAG.getTargetConstant(
7570                 cast<ConstantInt>(I.getArgOperand(0))->getZExtValue(), sdl,
7571                 MVT::i32)));
7572         break;
7573       default: llvm_unreachable("unknown trap intrinsic");
7574       }
7575       DAG.addNoMergeSiteInfo(DAG.getRoot().getNode(),
7576                              I.hasFnAttr(Attribute::NoMerge));
7577       return;
7578     }
7579     TargetLowering::ArgListTy Args;
7580     if (Intrinsic == Intrinsic::ubsantrap) {
7581       Args.push_back(TargetLoweringBase::ArgListEntry());
7582       Args[0].Val = I.getArgOperand(0);
7583       Args[0].Node = getValue(Args[0].Val);
7584       Args[0].Ty = Args[0].Val->getType();
7585     }
7586 
7587     TargetLowering::CallLoweringInfo CLI(DAG);
7588     CLI.setDebugLoc(sdl).setChain(getRoot()).setLibCallee(
7589         CallingConv::C, I.getType(),
7590         DAG.getExternalSymbol(TrapFuncName.data(),
7591                               TLI.getPointerTy(DAG.getDataLayout())),
7592         std::move(Args));
7593     CLI.NoMerge = I.hasFnAttr(Attribute::NoMerge);
7594     std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
7595     DAG.setRoot(Result.second);
7596     return;
7597   }
7598 
7599   case Intrinsic::allow_runtime_check:
7600   case Intrinsic::allow_ubsan_check:
7601     setValue(&I, getValue(ConstantInt::getTrue(I.getType())));
7602     return;
7603 
7604   case Intrinsic::uadd_with_overflow:
7605   case Intrinsic::sadd_with_overflow:
7606   case Intrinsic::usub_with_overflow:
7607   case Intrinsic::ssub_with_overflow:
7608   case Intrinsic::umul_with_overflow:
7609   case Intrinsic::smul_with_overflow: {
7610     ISD::NodeType Op;
7611     switch (Intrinsic) {
7612     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
7613     case Intrinsic::uadd_with_overflow: Op = ISD::UADDO; break;
7614     case Intrinsic::sadd_with_overflow: Op = ISD::SADDO; break;
7615     case Intrinsic::usub_with_overflow: Op = ISD::USUBO; break;
7616     case Intrinsic::ssub_with_overflow: Op = ISD::SSUBO; break;
7617     case Intrinsic::umul_with_overflow: Op = ISD::UMULO; break;
7618     case Intrinsic::smul_with_overflow: Op = ISD::SMULO; break;
7619     }
7620     SDValue Op1 = getValue(I.getArgOperand(0));
7621     SDValue Op2 = getValue(I.getArgOperand(1));
7622 
7623     EVT ResultVT = Op1.getValueType();
7624     EVT OverflowVT = MVT::i1;
7625     if (ResultVT.isVector())
7626       OverflowVT = EVT::getVectorVT(
7627           *Context, OverflowVT, ResultVT.getVectorElementCount());
7628 
7629     SDVTList VTs = DAG.getVTList(ResultVT, OverflowVT);
7630     setValue(&I, DAG.getNode(Op, sdl, VTs, Op1, Op2));
7631     return;
7632   }
7633   case Intrinsic::prefetch: {
7634     SDValue Ops[5];
7635     unsigned rw = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
7636     auto Flags = rw == 0 ? MachineMemOperand::MOLoad :MachineMemOperand::MOStore;
7637     Ops[0] = DAG.getRoot();
7638     Ops[1] = getValue(I.getArgOperand(0));
7639     Ops[2] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(1)), sdl,
7640                                    MVT::i32);
7641     Ops[3] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(2)), sdl,
7642                                    MVT::i32);
7643     Ops[4] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(3)), sdl,
7644                                    MVT::i32);
7645     SDValue Result = DAG.getMemIntrinsicNode(
7646         ISD::PREFETCH, sdl, DAG.getVTList(MVT::Other), Ops,
7647         EVT::getIntegerVT(*Context, 8), MachinePointerInfo(I.getArgOperand(0)),
7648         /* align */ std::nullopt, Flags);
7649 
7650     // Chain the prefetch in parallel with any pending loads, to stay out of
7651     // the way of later optimizations.
7652     PendingLoads.push_back(Result);
7653     Result = getRoot();
7654     DAG.setRoot(Result);
7655     return;
7656   }
7657   case Intrinsic::lifetime_start:
7658   case Intrinsic::lifetime_end: {
7659     bool IsStart = (Intrinsic == Intrinsic::lifetime_start);
7660     // Stack coloring is not enabled in O0, discard region information.
7661     if (TM.getOptLevel() == CodeGenOptLevel::None)
7662       return;
7663 
7664     const int64_t ObjectSize =
7665         cast<ConstantInt>(I.getArgOperand(0))->getSExtValue();
7666     Value *const ObjectPtr = I.getArgOperand(1);
7667     SmallVector<const Value *, 4> Allocas;
7668     getUnderlyingObjects(ObjectPtr, Allocas);
7669 
7670     for (const Value *Alloca : Allocas) {
7671       const AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(Alloca);
7672 
7673       // Could not find an Alloca.
7674       if (!LifetimeObject)
7675         continue;
7676 
7677       // First check that the Alloca is static, otherwise it won't have a
7678       // valid frame index.
7679       auto SI = FuncInfo.StaticAllocaMap.find(LifetimeObject);
7680       if (SI == FuncInfo.StaticAllocaMap.end())
7681         return;
7682 
7683       const int FrameIndex = SI->second;
7684       int64_t Offset;
7685       if (GetPointerBaseWithConstantOffset(
7686               ObjectPtr, Offset, DAG.getDataLayout()) != LifetimeObject)
7687         Offset = -1; // Cannot determine offset from alloca to lifetime object.
7688       Res = DAG.getLifetimeNode(IsStart, sdl, getRoot(), FrameIndex, ObjectSize,
7689                                 Offset);
7690       DAG.setRoot(Res);
7691     }
7692     return;
7693   }
7694   case Intrinsic::pseudoprobe: {
7695     auto Guid = cast<ConstantInt>(I.getArgOperand(0))->getZExtValue();
7696     auto Index = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
7697     auto Attr = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue();
7698     Res = DAG.getPseudoProbeNode(sdl, getRoot(), Guid, Index, Attr);
7699     DAG.setRoot(Res);
7700     return;
7701   }
7702   case Intrinsic::invariant_start:
7703     // Discard region information.
7704     setValue(&I,
7705              DAG.getUNDEF(TLI.getValueType(DAG.getDataLayout(), I.getType())));
7706     return;
7707   case Intrinsic::invariant_end:
7708     // Discard region information.
7709     return;
7710   case Intrinsic::clear_cache: {
7711     SDValue InputChain = DAG.getRoot();
7712     SDValue StartVal = getValue(I.getArgOperand(0));
7713     SDValue EndVal = getValue(I.getArgOperand(1));
7714     Res = DAG.getNode(ISD::CLEAR_CACHE, sdl, DAG.getVTList(MVT::Other),
7715                       {InputChain, StartVal, EndVal});
7716     setValue(&I, Res);
7717     DAG.setRoot(Res);
7718     return;
7719   }
7720   case Intrinsic::donothing:
7721   case Intrinsic::seh_try_begin:
7722   case Intrinsic::seh_scope_begin:
7723   case Intrinsic::seh_try_end:
7724   case Intrinsic::seh_scope_end:
7725     // ignore
7726     return;
7727   case Intrinsic::experimental_stackmap:
7728     visitStackmap(I);
7729     return;
7730   case Intrinsic::experimental_patchpoint_void:
7731   case Intrinsic::experimental_patchpoint:
7732     visitPatchpoint(I);
7733     return;
7734   case Intrinsic::experimental_gc_statepoint:
7735     LowerStatepoint(cast<GCStatepointInst>(I));
7736     return;
7737   case Intrinsic::experimental_gc_result:
7738     visitGCResult(cast<GCResultInst>(I));
7739     return;
7740   case Intrinsic::experimental_gc_relocate:
7741     visitGCRelocate(cast<GCRelocateInst>(I));
7742     return;
7743   case Intrinsic::instrprof_cover:
7744     llvm_unreachable("instrprof failed to lower a cover");
7745   case Intrinsic::instrprof_increment:
7746     llvm_unreachable("instrprof failed to lower an increment");
7747   case Intrinsic::instrprof_timestamp:
7748     llvm_unreachable("instrprof failed to lower a timestamp");
7749   case Intrinsic::instrprof_value_profile:
7750     llvm_unreachable("instrprof failed to lower a value profiling call");
7751   case Intrinsic::instrprof_mcdc_parameters:
7752     llvm_unreachable("instrprof failed to lower mcdc parameters");
7753   case Intrinsic::instrprof_mcdc_tvbitmap_update:
7754     llvm_unreachable("instrprof failed to lower an mcdc tvbitmap update");
7755   case Intrinsic::localescape: {
7756     MachineFunction &MF = DAG.getMachineFunction();
7757     const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
7758 
7759     // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission
7760     // is the same on all targets.
7761     for (unsigned Idx = 0, E = I.arg_size(); Idx < E; ++Idx) {
7762       Value *Arg = I.getArgOperand(Idx)->stripPointerCasts();
7763       if (isa<ConstantPointerNull>(Arg))
7764         continue; // Skip null pointers. They represent a hole in index space.
7765       AllocaInst *Slot = cast<AllocaInst>(Arg);
7766       assert(FuncInfo.StaticAllocaMap.count(Slot) &&
7767              "can only escape static allocas");
7768       int FI = FuncInfo.StaticAllocaMap[Slot];
7769       MCSymbol *FrameAllocSym = MF.getContext().getOrCreateFrameAllocSymbol(
7770           GlobalValue::dropLLVMManglingEscape(MF.getName()), Idx);
7771       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, dl,
7772               TII->get(TargetOpcode::LOCAL_ESCAPE))
7773           .addSym(FrameAllocSym)
7774           .addFrameIndex(FI);
7775     }
7776 
7777     return;
7778   }
7779 
7780   case Intrinsic::localrecover: {
7781     // i8* @llvm.localrecover(i8* %fn, i8* %fp, i32 %idx)
7782     MachineFunction &MF = DAG.getMachineFunction();
7783 
7784     // Get the symbol that defines the frame offset.
7785     auto *Fn = cast<Function>(I.getArgOperand(0)->stripPointerCasts());
7786     auto *Idx = cast<ConstantInt>(I.getArgOperand(2));
7787     unsigned IdxVal =
7788         unsigned(Idx->getLimitedValue(std::numeric_limits<int>::max()));
7789     MCSymbol *FrameAllocSym = MF.getContext().getOrCreateFrameAllocSymbol(
7790         GlobalValue::dropLLVMManglingEscape(Fn->getName()), IdxVal);
7791 
7792     Value *FP = I.getArgOperand(1);
7793     SDValue FPVal = getValue(FP);
7794     EVT PtrVT = FPVal.getValueType();
7795 
7796     // Create a MCSymbol for the label to avoid any target lowering
7797     // that would make this PC relative.
7798     SDValue OffsetSym = DAG.getMCSymbol(FrameAllocSym, PtrVT);
7799     SDValue OffsetVal =
7800         DAG.getNode(ISD::LOCAL_RECOVER, sdl, PtrVT, OffsetSym);
7801 
7802     // Add the offset to the FP.
7803     SDValue Add = DAG.getMemBasePlusOffset(FPVal, OffsetVal, sdl);
7804     setValue(&I, Add);
7805 
7806     return;
7807   }
7808 
7809   case Intrinsic::fake_use: {
7810     Value *V = I.getArgOperand(0);
7811     SDValue Ops[2];
7812     // For Values not declared or previously used in this basic block, the
7813     // NodeMap will not have an entry, and `getValue` will assert if V has no
7814     // valid register value.
7815     auto FakeUseValue = [&]() -> SDValue {
7816       SDValue &N = NodeMap[V];
7817       if (N.getNode())
7818         return N;
7819 
7820       // If there's a virtual register allocated and initialized for this
7821       // value, use it.
7822       if (SDValue copyFromReg = getCopyFromRegs(V, V->getType()))
7823         return copyFromReg;
7824       // FIXME: Do we want to preserve constants? It seems pointless.
7825       if (isa<Constant>(V))
7826         return getValue(V);
7827       return SDValue();
7828     }();
7829     if (!FakeUseValue || FakeUseValue.isUndef())
7830       return;
7831     Ops[0] = getRoot();
7832     Ops[1] = FakeUseValue;
7833     // Also, do not translate a fake use with an undef operand, or any other
7834     // empty SDValues.
7835     if (!Ops[1] || Ops[1].isUndef())
7836       return;
7837     DAG.setRoot(DAG.getNode(ISD::FAKE_USE, sdl, MVT::Other, Ops));
7838     return;
7839   }
7840 
7841   case Intrinsic::eh_exceptionpointer:
7842   case Intrinsic::eh_exceptioncode: {
7843     // Get the exception pointer vreg, copy from it, and resize it to fit.
7844     const auto *CPI = cast<CatchPadInst>(I.getArgOperand(0));
7845     MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
7846     const TargetRegisterClass *PtrRC = TLI.getRegClassFor(PtrVT);
7847     Register VReg = FuncInfo.getCatchPadExceptionPointerVReg(CPI, PtrRC);
7848     SDValue N = DAG.getCopyFromReg(DAG.getEntryNode(), sdl, VReg, PtrVT);
7849     if (Intrinsic == Intrinsic::eh_exceptioncode)
7850       N = DAG.getZExtOrTrunc(N, sdl, MVT::i32);
7851     setValue(&I, N);
7852     return;
7853   }
7854   case Intrinsic::xray_customevent: {
7855     // Here we want to make sure that the intrinsic behaves as if it has a
7856     // specific calling convention.
7857     const auto &Triple = DAG.getTarget().getTargetTriple();
7858     if (!Triple.isAArch64(64) && Triple.getArch() != Triple::x86_64)
7859       return;
7860 
7861     SmallVector<SDValue, 8> Ops;
7862 
7863     // We want to say that we always want the arguments in registers.
7864     SDValue LogEntryVal = getValue(I.getArgOperand(0));
7865     SDValue StrSizeVal = getValue(I.getArgOperand(1));
7866     SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7867     SDValue Chain = getRoot();
7868     Ops.push_back(LogEntryVal);
7869     Ops.push_back(StrSizeVal);
7870     Ops.push_back(Chain);
7871 
7872     // We need to enforce the calling convention for the callsite, so that
7873     // argument ordering is enforced correctly, and that register allocation can
7874     // see that some registers may be assumed clobbered and have to preserve
7875     // them across calls to the intrinsic.
7876     MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHABLE_EVENT_CALL,
7877                                            sdl, NodeTys, Ops);
7878     SDValue patchableNode = SDValue(MN, 0);
7879     DAG.setRoot(patchableNode);
7880     setValue(&I, patchableNode);
7881     return;
7882   }
7883   case Intrinsic::xray_typedevent: {
7884     // Here we want to make sure that the intrinsic behaves as if it has a
7885     // specific calling convention.
7886     const auto &Triple = DAG.getTarget().getTargetTriple();
7887     if (!Triple.isAArch64(64) && Triple.getArch() != Triple::x86_64)
7888       return;
7889 
7890     SmallVector<SDValue, 8> Ops;
7891 
7892     // We want to say that we always want the arguments in registers.
7893     // It's unclear to me how manipulating the selection DAG here forces callers
7894     // to provide arguments in registers instead of on the stack.
7895     SDValue LogTypeId = getValue(I.getArgOperand(0));
7896     SDValue LogEntryVal = getValue(I.getArgOperand(1));
7897     SDValue StrSizeVal = getValue(I.getArgOperand(2));
7898     SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7899     SDValue Chain = getRoot();
7900     Ops.push_back(LogTypeId);
7901     Ops.push_back(LogEntryVal);
7902     Ops.push_back(StrSizeVal);
7903     Ops.push_back(Chain);
7904 
7905     // We need to enforce the calling convention for the callsite, so that
7906     // argument ordering is enforced correctly, and that register allocation can
7907     // see that some registers may be assumed clobbered and have to preserve
7908     // them across calls to the intrinsic.
7909     MachineSDNode *MN = DAG.getMachineNode(
7910         TargetOpcode::PATCHABLE_TYPED_EVENT_CALL, sdl, NodeTys, Ops);
7911     SDValue patchableNode = SDValue(MN, 0);
7912     DAG.setRoot(patchableNode);
7913     setValue(&I, patchableNode);
7914     return;
7915   }
7916   case Intrinsic::experimental_deoptimize:
7917     LowerDeoptimizeCall(&I);
7918     return;
7919   case Intrinsic::stepvector:
7920     visitStepVector(I);
7921     return;
7922   case Intrinsic::vector_reduce_fadd:
7923   case Intrinsic::vector_reduce_fmul:
7924   case Intrinsic::vector_reduce_add:
7925   case Intrinsic::vector_reduce_mul:
7926   case Intrinsic::vector_reduce_and:
7927   case Intrinsic::vector_reduce_or:
7928   case Intrinsic::vector_reduce_xor:
7929   case Intrinsic::vector_reduce_smax:
7930   case Intrinsic::vector_reduce_smin:
7931   case Intrinsic::vector_reduce_umax:
7932   case Intrinsic::vector_reduce_umin:
7933   case Intrinsic::vector_reduce_fmax:
7934   case Intrinsic::vector_reduce_fmin:
7935   case Intrinsic::vector_reduce_fmaximum:
7936   case Intrinsic::vector_reduce_fminimum:
7937     visitVectorReduce(I, Intrinsic);
7938     return;
7939 
7940   case Intrinsic::icall_branch_funnel: {
7941     SmallVector<SDValue, 16> Ops;
7942     Ops.push_back(getValue(I.getArgOperand(0)));
7943 
7944     int64_t Offset;
7945     auto *Base = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
7946         I.getArgOperand(1), Offset, DAG.getDataLayout()));
7947     if (!Base)
7948       report_fatal_error(
7949           "llvm.icall.branch.funnel operand must be a GlobalValue");
7950     Ops.push_back(DAG.getTargetGlobalAddress(Base, sdl, MVT::i64, 0));
7951 
7952     struct BranchFunnelTarget {
7953       int64_t Offset;
7954       SDValue Target;
7955     };
7956     SmallVector<BranchFunnelTarget, 8> Targets;
7957 
7958     for (unsigned Op = 1, N = I.arg_size(); Op != N; Op += 2) {
7959       auto *ElemBase = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
7960           I.getArgOperand(Op), Offset, DAG.getDataLayout()));
7961       if (ElemBase != Base)
7962         report_fatal_error("all llvm.icall.branch.funnel operands must refer "
7963                            "to the same GlobalValue");
7964 
7965       SDValue Val = getValue(I.getArgOperand(Op + 1));
7966       auto *GA = dyn_cast<GlobalAddressSDNode>(Val);
7967       if (!GA)
7968         report_fatal_error(
7969             "llvm.icall.branch.funnel operand must be a GlobalValue");
7970       Targets.push_back({Offset, DAG.getTargetGlobalAddress(
7971                                      GA->getGlobal(), sdl, Val.getValueType(),
7972                                      GA->getOffset())});
7973     }
7974     llvm::sort(Targets,
7975                [](const BranchFunnelTarget &T1, const BranchFunnelTarget &T2) {
7976                  return T1.Offset < T2.Offset;
7977                });
7978 
7979     for (auto &T : Targets) {
7980       Ops.push_back(DAG.getTargetConstant(T.Offset, sdl, MVT::i32));
7981       Ops.push_back(T.Target);
7982     }
7983 
7984     Ops.push_back(DAG.getRoot()); // Chain
7985     SDValue N(DAG.getMachineNode(TargetOpcode::ICALL_BRANCH_FUNNEL, sdl,
7986                                  MVT::Other, Ops),
7987               0);
7988     DAG.setRoot(N);
7989     setValue(&I, N);
7990     HasTailCall = true;
7991     return;
7992   }
7993 
7994   case Intrinsic::wasm_landingpad_index:
7995     // Information this intrinsic contained has been transferred to
7996     // MachineFunction in SelectionDAGISel::PrepareEHLandingPad. We can safely
7997     // delete it now.
7998     return;
7999 
8000   case Intrinsic::aarch64_settag:
8001   case Intrinsic::aarch64_settag_zero: {
8002     const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
8003     bool ZeroMemory = Intrinsic == Intrinsic::aarch64_settag_zero;
8004     SDValue Val = TSI.EmitTargetCodeForSetTag(
8005         DAG, sdl, getRoot(), getValue(I.getArgOperand(0)),
8006         getValue(I.getArgOperand(1)), MachinePointerInfo(I.getArgOperand(0)),
8007         ZeroMemory);
8008     DAG.setRoot(Val);
8009     setValue(&I, Val);
8010     return;
8011   }
8012   case Intrinsic::amdgcn_cs_chain: {
8013     assert(I.arg_size() == 5 && "Additional args not supported yet");
8014     assert(cast<ConstantInt>(I.getOperand(4))->isZero() &&
8015            "Non-zero flags not supported yet");
8016 
8017     // At this point we don't care if it's amdgpu_cs_chain or
8018     // amdgpu_cs_chain_preserve.
8019     CallingConv::ID CC = CallingConv::AMDGPU_CS_Chain;
8020 
8021     Type *RetTy = I.getType();
8022     assert(RetTy->isVoidTy() && "Should not return");
8023 
8024     SDValue Callee = getValue(I.getOperand(0));
8025 
8026     // We only have 2 actual args: one for the SGPRs and one for the VGPRs.
8027     // We'll also tack the value of the EXEC mask at the end.
8028     TargetLowering::ArgListTy Args;
8029     Args.reserve(3);
8030 
8031     for (unsigned Idx : {2, 3, 1}) {
8032       TargetLowering::ArgListEntry Arg;
8033       Arg.Node = getValue(I.getOperand(Idx));
8034       Arg.Ty = I.getOperand(Idx)->getType();
8035       Arg.setAttributes(&I, Idx);
8036       Args.push_back(Arg);
8037     }
8038 
8039     assert(Args[0].IsInReg && "SGPR args should be marked inreg");
8040     assert(!Args[1].IsInReg && "VGPR args should not be marked inreg");
8041     Args[2].IsInReg = true; // EXEC should be inreg
8042 
8043     TargetLowering::CallLoweringInfo CLI(DAG);
8044     CLI.setDebugLoc(getCurSDLoc())
8045         .setChain(getRoot())
8046         .setCallee(CC, RetTy, Callee, std::move(Args))
8047         .setNoReturn(true)
8048         .setTailCall(true)
8049         .setConvergent(I.isConvergent());
8050     CLI.CB = &I;
8051     std::pair<SDValue, SDValue> Result =
8052         lowerInvokable(CLI, /*EHPadBB*/ nullptr);
8053     (void)Result;
8054     assert(!Result.first.getNode() && !Result.second.getNode() &&
8055            "Should've lowered as tail call");
8056 
8057     HasTailCall = true;
8058     return;
8059   }
8060   case Intrinsic::ptrmask: {
8061     SDValue Ptr = getValue(I.getOperand(0));
8062     SDValue Mask = getValue(I.getOperand(1));
8063 
8064     // On arm64_32, pointers are 32 bits when stored in memory, but
8065     // zero-extended to 64 bits when in registers.  Thus the mask is 32 bits to
8066     // match the index type, but the pointer is 64 bits, so the the mask must be
8067     // zero-extended up to 64 bits to match the pointer.
8068     EVT PtrVT =
8069         TLI.getValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
8070     EVT MemVT =
8071         TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
8072     assert(PtrVT == Ptr.getValueType());
8073     assert(MemVT == Mask.getValueType());
8074     if (MemVT != PtrVT)
8075       Mask = DAG.getPtrExtOrTrunc(Mask, sdl, PtrVT);
8076 
8077     setValue(&I, DAG.getNode(ISD::AND, sdl, PtrVT, Ptr, Mask));
8078     return;
8079   }
8080   case Intrinsic::threadlocal_address: {
8081     setValue(&I, getValue(I.getOperand(0)));
8082     return;
8083   }
8084   case Intrinsic::get_active_lane_mask: {
8085     EVT CCVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
8086     SDValue Index = getValue(I.getOperand(0));
8087     EVT ElementVT = Index.getValueType();
8088 
8089     if (!TLI.shouldExpandGetActiveLaneMask(CCVT, ElementVT)) {
8090       visitTargetIntrinsic(I, Intrinsic);
8091       return;
8092     }
8093 
8094     SDValue TripCount = getValue(I.getOperand(1));
8095     EVT VecTy = EVT::getVectorVT(*DAG.getContext(), ElementVT,
8096                                  CCVT.getVectorElementCount());
8097 
8098     SDValue VectorIndex = DAG.getSplat(VecTy, sdl, Index);
8099     SDValue VectorTripCount = DAG.getSplat(VecTy, sdl, TripCount);
8100     SDValue VectorStep = DAG.getStepVector(sdl, VecTy);
8101     SDValue VectorInduction = DAG.getNode(
8102         ISD::UADDSAT, sdl, VecTy, VectorIndex, VectorStep);
8103     SDValue SetCC = DAG.getSetCC(sdl, CCVT, VectorInduction,
8104                                  VectorTripCount, ISD::CondCode::SETULT);
8105     setValue(&I, SetCC);
8106     return;
8107   }
8108   case Intrinsic::experimental_get_vector_length: {
8109     assert(cast<ConstantInt>(I.getOperand(1))->getSExtValue() > 0 &&
8110            "Expected positive VF");
8111     unsigned VF = cast<ConstantInt>(I.getOperand(1))->getZExtValue();
8112     bool IsScalable = cast<ConstantInt>(I.getOperand(2))->isOne();
8113 
8114     SDValue Count = getValue(I.getOperand(0));
8115     EVT CountVT = Count.getValueType();
8116 
8117     if (!TLI.shouldExpandGetVectorLength(CountVT, VF, IsScalable)) {
8118       visitTargetIntrinsic(I, Intrinsic);
8119       return;
8120     }
8121 
8122     // Expand to a umin between the trip count and the maximum elements the type
8123     // can hold.
8124     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
8125 
8126     // Extend the trip count to at least the result VT.
8127     if (CountVT.bitsLT(VT)) {
8128       Count = DAG.getNode(ISD::ZERO_EXTEND, sdl, VT, Count);
8129       CountVT = VT;
8130     }
8131 
8132     SDValue MaxEVL = DAG.getElementCount(sdl, CountVT,
8133                                          ElementCount::get(VF, IsScalable));
8134 
8135     SDValue UMin = DAG.getNode(ISD::UMIN, sdl, CountVT, Count, MaxEVL);
8136     // Clip to the result type if needed.
8137     SDValue Trunc = DAG.getNode(ISD::TRUNCATE, sdl, VT, UMin);
8138 
8139     setValue(&I, Trunc);
8140     return;
8141   }
8142   case Intrinsic::experimental_vector_partial_reduce_add: {
8143 
8144     if (!TLI.shouldExpandPartialReductionIntrinsic(cast<IntrinsicInst>(&I))) {
8145       visitTargetIntrinsic(I, Intrinsic);
8146       return;
8147     }
8148 
8149     setValue(&I, DAG.getPartialReduceAdd(sdl, EVT::getEVT(I.getType()),
8150                                          getValue(I.getOperand(0)),
8151                                          getValue(I.getOperand(1))));
8152     return;
8153   }
8154   case Intrinsic::experimental_cttz_elts: {
8155     auto DL = getCurSDLoc();
8156     SDValue Op = getValue(I.getOperand(0));
8157     EVT OpVT = Op.getValueType();
8158 
8159     if (!TLI.shouldExpandCttzElements(OpVT)) {
8160       visitTargetIntrinsic(I, Intrinsic);
8161       return;
8162     }
8163 
8164     if (OpVT.getScalarType() != MVT::i1) {
8165       // Compare the input vector elements to zero & use to count trailing zeros
8166       SDValue AllZero = DAG.getConstant(0, DL, OpVT);
8167       OpVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
8168                               OpVT.getVectorElementCount());
8169       Op = DAG.getSetCC(DL, OpVT, Op, AllZero, ISD::SETNE);
8170     }
8171 
8172     // If the zero-is-poison flag is set, we can assume the upper limit
8173     // of the result is VF-1.
8174     bool ZeroIsPoison =
8175         !cast<ConstantSDNode>(getValue(I.getOperand(1)))->isZero();
8176     ConstantRange VScaleRange(1, true); // Dummy value.
8177     if (isa<ScalableVectorType>(I.getOperand(0)->getType()))
8178       VScaleRange = getVScaleRange(I.getCaller(), 64);
8179     unsigned EltWidth = TLI.getBitWidthForCttzElements(
8180         I.getType(), OpVT.getVectorElementCount(), ZeroIsPoison, &VScaleRange);
8181 
8182     MVT NewEltTy = MVT::getIntegerVT(EltWidth);
8183 
8184     // Create the new vector type & get the vector length
8185     EVT NewVT = EVT::getVectorVT(*DAG.getContext(), NewEltTy,
8186                                  OpVT.getVectorElementCount());
8187 
8188     SDValue VL =
8189         DAG.getElementCount(DL, NewEltTy, OpVT.getVectorElementCount());
8190 
8191     SDValue StepVec = DAG.getStepVector(DL, NewVT);
8192     SDValue SplatVL = DAG.getSplat(NewVT, DL, VL);
8193     SDValue StepVL = DAG.getNode(ISD::SUB, DL, NewVT, SplatVL, StepVec);
8194     SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, Op);
8195     SDValue And = DAG.getNode(ISD::AND, DL, NewVT, StepVL, Ext);
8196     SDValue Max = DAG.getNode(ISD::VECREDUCE_UMAX, DL, NewEltTy, And);
8197     SDValue Sub = DAG.getNode(ISD::SUB, DL, NewEltTy, VL, Max);
8198 
8199     EVT RetTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
8200     SDValue Ret = DAG.getZExtOrTrunc(Sub, DL, RetTy);
8201 
8202     setValue(&I, Ret);
8203     return;
8204   }
8205   case Intrinsic::vector_insert: {
8206     SDValue Vec = getValue(I.getOperand(0));
8207     SDValue SubVec = getValue(I.getOperand(1));
8208     SDValue Index = getValue(I.getOperand(2));
8209 
8210     // The intrinsic's index type is i64, but the SDNode requires an index type
8211     // suitable for the target. Convert the index as required.
8212     MVT VectorIdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
8213     if (Index.getValueType() != VectorIdxTy)
8214       Index = DAG.getVectorIdxConstant(Index->getAsZExtVal(), sdl);
8215 
8216     EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
8217     setValue(&I, DAG.getNode(ISD::INSERT_SUBVECTOR, sdl, ResultVT, Vec, SubVec,
8218                              Index));
8219     return;
8220   }
8221   case Intrinsic::vector_extract: {
8222     SDValue Vec = getValue(I.getOperand(0));
8223     SDValue Index = getValue(I.getOperand(1));
8224     EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
8225 
8226     // The intrinsic's index type is i64, but the SDNode requires an index type
8227     // suitable for the target. Convert the index as required.
8228     MVT VectorIdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
8229     if (Index.getValueType() != VectorIdxTy)
8230       Index = DAG.getVectorIdxConstant(Index->getAsZExtVal(), sdl);
8231 
8232     setValue(&I,
8233              DAG.getNode(ISD::EXTRACT_SUBVECTOR, sdl, ResultVT, Vec, Index));
8234     return;
8235   }
8236   case Intrinsic::experimental_vector_match: {
8237     SDValue Op1 = getValue(I.getOperand(0));
8238     SDValue Op2 = getValue(I.getOperand(1));
8239     SDValue Mask = getValue(I.getOperand(2));
8240     EVT Op1VT = Op1.getValueType();
8241     EVT Op2VT = Op2.getValueType();
8242     EVT ResVT = Mask.getValueType();
8243     unsigned SearchSize = Op2VT.getVectorNumElements();
8244 
8245     // If the target has native support for this vector match operation, lower
8246     // the intrinsic untouched; otherwise, expand it below.
8247     if (!TLI.shouldExpandVectorMatch(Op1VT, SearchSize)) {
8248       visitTargetIntrinsic(I, Intrinsic);
8249       return;
8250     }
8251 
8252     SDValue Ret = DAG.getConstant(0, sdl, ResVT);
8253 
8254     for (unsigned i = 0; i < SearchSize; ++i) {
8255       SDValue Op2Elem = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, sdl,
8256                                     Op2VT.getVectorElementType(), Op2,
8257                                     DAG.getVectorIdxConstant(i, sdl));
8258       SDValue Splat = DAG.getNode(ISD::SPLAT_VECTOR, sdl, Op1VT, Op2Elem);
8259       SDValue Cmp = DAG.getSetCC(sdl, ResVT, Op1, Splat, ISD::SETEQ);
8260       Ret = DAG.getNode(ISD::OR, sdl, ResVT, Ret, Cmp);
8261     }
8262 
8263     setValue(&I, DAG.getNode(ISD::AND, sdl, ResVT, Ret, Mask));
8264     return;
8265   }
8266   case Intrinsic::vector_reverse:
8267     visitVectorReverse(I);
8268     return;
8269   case Intrinsic::vector_splice:
8270     visitVectorSplice(I);
8271     return;
8272   case Intrinsic::callbr_landingpad:
8273     visitCallBrLandingPad(I);
8274     return;
8275   case Intrinsic::vector_interleave2:
8276     visitVectorInterleave(I);
8277     return;
8278   case Intrinsic::vector_deinterleave2:
8279     visitVectorDeinterleave(I);
8280     return;
8281   case Intrinsic::experimental_vector_compress:
8282     setValue(&I, DAG.getNode(ISD::VECTOR_COMPRESS, sdl,
8283                              getValue(I.getArgOperand(0)).getValueType(),
8284                              getValue(I.getArgOperand(0)),
8285                              getValue(I.getArgOperand(1)),
8286                              getValue(I.getArgOperand(2)), Flags));
8287     return;
8288   case Intrinsic::experimental_convergence_anchor:
8289   case Intrinsic::experimental_convergence_entry:
8290   case Intrinsic::experimental_convergence_loop:
8291     visitConvergenceControl(I, Intrinsic);
8292     return;
8293   case Intrinsic::experimental_vector_histogram_add: {
8294     visitVectorHistogram(I, Intrinsic);
8295     return;
8296   }
8297   case Intrinsic::experimental_vector_extract_last_active: {
8298     visitVectorExtractLastActive(I, Intrinsic);
8299     return;
8300   }
8301   }
8302 }
8303 
8304 void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
8305     const ConstrainedFPIntrinsic &FPI) {
8306   SDLoc sdl = getCurSDLoc();
8307 
8308   // We do not need to serialize constrained FP intrinsics against
8309   // each other or against (nonvolatile) loads, so they can be
8310   // chained like loads.
8311   SDValue Chain = DAG.getRoot();
8312   SmallVector<SDValue, 4> Opers;
8313   Opers.push_back(Chain);
8314   for (unsigned I = 0, E = FPI.getNonMetadataArgCount(); I != E; ++I)
8315     Opers.push_back(getValue(FPI.getArgOperand(I)));
8316 
8317   auto pushOutChain = [this](SDValue Result, fp::ExceptionBehavior EB) {
8318     assert(Result.getNode()->getNumValues() == 2);
8319 
8320     // Push node to the appropriate list so that future instructions can be
8321     // chained up correctly.
8322     SDValue OutChain = Result.getValue(1);
8323     switch (EB) {
8324     case fp::ExceptionBehavior::ebIgnore:
8325       // The only reason why ebIgnore nodes still need to be chained is that
8326       // they might depend on the current rounding mode, and therefore must
8327       // not be moved across instruction that may change that mode.
8328       [[fallthrough]];
8329     case fp::ExceptionBehavior::ebMayTrap:
8330       // These must not be moved across calls or instructions that may change
8331       // floating-point exception masks.
8332       PendingConstrainedFP.push_back(OutChain);
8333       break;
8334     case fp::ExceptionBehavior::ebStrict:
8335       // These must not be moved across calls or instructions that may change
8336       // floating-point exception masks or read floating-point exception flags.
8337       // In addition, they cannot be optimized out even if unused.
8338       PendingConstrainedFPStrict.push_back(OutChain);
8339       break;
8340     }
8341   };
8342 
8343   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8344   EVT VT = TLI.getValueType(DAG.getDataLayout(), FPI.getType());
8345   SDVTList VTs = DAG.getVTList(VT, MVT::Other);
8346   fp::ExceptionBehavior EB = *FPI.getExceptionBehavior();
8347 
8348   SDNodeFlags Flags;
8349   if (EB == fp::ExceptionBehavior::ebIgnore)
8350     Flags.setNoFPExcept(true);
8351 
8352   if (auto *FPOp = dyn_cast<FPMathOperator>(&FPI))
8353     Flags.copyFMF(*FPOp);
8354 
8355   unsigned Opcode;
8356   switch (FPI.getIntrinsicID()) {
8357   default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
8358 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
8359   case Intrinsic::INTRINSIC:                                                   \
8360     Opcode = ISD::STRICT_##DAGN;                                               \
8361     break;
8362 #include "llvm/IR/ConstrainedOps.def"
8363   case Intrinsic::experimental_constrained_fmuladd: {
8364     Opcode = ISD::STRICT_FMA;
8365     // Break fmuladd into fmul and fadd.
8366     if (TM.Options.AllowFPOpFusion == FPOpFusion::Strict ||
8367         !TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT)) {
8368       Opers.pop_back();
8369       SDValue Mul = DAG.getNode(ISD::STRICT_FMUL, sdl, VTs, Opers, Flags);
8370       pushOutChain(Mul, EB);
8371       Opcode = ISD::STRICT_FADD;
8372       Opers.clear();
8373       Opers.push_back(Mul.getValue(1));
8374       Opers.push_back(Mul.getValue(0));
8375       Opers.push_back(getValue(FPI.getArgOperand(2)));
8376     }
8377     break;
8378   }
8379   }
8380 
8381   // A few strict DAG nodes carry additional operands that are not
8382   // set up by the default code above.
8383   switch (Opcode) {
8384   default: break;
8385   case ISD::STRICT_FP_ROUND:
8386     Opers.push_back(
8387         DAG.getTargetConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout())));
8388     break;
8389   case ISD::STRICT_FSETCC:
8390   case ISD::STRICT_FSETCCS: {
8391     auto *FPCmp = dyn_cast<ConstrainedFPCmpIntrinsic>(&FPI);
8392     ISD::CondCode Condition = getFCmpCondCode(FPCmp->getPredicate());
8393     if (TM.Options.NoNaNsFPMath)
8394       Condition = getFCmpCodeWithoutNaN(Condition);
8395     Opers.push_back(DAG.getCondCode(Condition));
8396     break;
8397   }
8398   }
8399 
8400   SDValue Result = DAG.getNode(Opcode, sdl, VTs, Opers, Flags);
8401   pushOutChain(Result, EB);
8402 
8403   SDValue FPResult = Result.getValue(0);
8404   setValue(&FPI, FPResult);
8405 }
8406 
8407 static unsigned getISDForVPIntrinsic(const VPIntrinsic &VPIntrin) {
8408   std::optional<unsigned> ResOPC;
8409   switch (VPIntrin.getIntrinsicID()) {
8410   case Intrinsic::vp_ctlz: {
8411     bool IsZeroUndef = cast<ConstantInt>(VPIntrin.getArgOperand(1))->isOne();
8412     ResOPC = IsZeroUndef ? ISD::VP_CTLZ_ZERO_UNDEF : ISD::VP_CTLZ;
8413     break;
8414   }
8415   case Intrinsic::vp_cttz: {
8416     bool IsZeroUndef = cast<ConstantInt>(VPIntrin.getArgOperand(1))->isOne();
8417     ResOPC = IsZeroUndef ? ISD::VP_CTTZ_ZERO_UNDEF : ISD::VP_CTTZ;
8418     break;
8419   }
8420   case Intrinsic::vp_cttz_elts: {
8421     bool IsZeroPoison = cast<ConstantInt>(VPIntrin.getArgOperand(1))->isOne();
8422     ResOPC = IsZeroPoison ? ISD::VP_CTTZ_ELTS_ZERO_UNDEF : ISD::VP_CTTZ_ELTS;
8423     break;
8424   }
8425 #define HELPER_MAP_VPID_TO_VPSD(VPID, VPSD)                                    \
8426   case Intrinsic::VPID:                                                        \
8427     ResOPC = ISD::VPSD;                                                        \
8428     break;
8429 #include "llvm/IR/VPIntrinsics.def"
8430   }
8431 
8432   if (!ResOPC)
8433     llvm_unreachable(
8434         "Inconsistency: no SDNode available for this VPIntrinsic!");
8435 
8436   if (*ResOPC == ISD::VP_REDUCE_SEQ_FADD ||
8437       *ResOPC == ISD::VP_REDUCE_SEQ_FMUL) {
8438     if (VPIntrin.getFastMathFlags().allowReassoc())
8439       return *ResOPC == ISD::VP_REDUCE_SEQ_FADD ? ISD::VP_REDUCE_FADD
8440                                                 : ISD::VP_REDUCE_FMUL;
8441   }
8442 
8443   return *ResOPC;
8444 }
8445 
8446 void SelectionDAGBuilder::visitVPLoad(
8447     const VPIntrinsic &VPIntrin, EVT VT,
8448     const SmallVectorImpl<SDValue> &OpValues) {
8449   SDLoc DL = getCurSDLoc();
8450   Value *PtrOperand = VPIntrin.getArgOperand(0);
8451   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
8452   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
8453   const MDNode *Ranges = getRangeMetadata(VPIntrin);
8454   SDValue LD;
8455   // Do not serialize variable-length loads of constant memory with
8456   // anything.
8457   if (!Alignment)
8458     Alignment = DAG.getEVTAlign(VT);
8459   MemoryLocation ML = MemoryLocation::getAfter(PtrOperand, AAInfo);
8460   bool AddToChain = !AA || !AA->pointsToConstantMemory(ML);
8461   SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
8462   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
8463       MachinePointerInfo(PtrOperand), MachineMemOperand::MOLoad,
8464       LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo, Ranges);
8465   LD = DAG.getLoadVP(VT, DL, InChain, OpValues[0], OpValues[1], OpValues[2],
8466                      MMO, false /*IsExpanding */);
8467   if (AddToChain)
8468     PendingLoads.push_back(LD.getValue(1));
8469   setValue(&VPIntrin, LD);
8470 }
8471 
8472 void SelectionDAGBuilder::visitVPGather(
8473     const VPIntrinsic &VPIntrin, EVT VT,
8474     const SmallVectorImpl<SDValue> &OpValues) {
8475   SDLoc DL = getCurSDLoc();
8476   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8477   Value *PtrOperand = VPIntrin.getArgOperand(0);
8478   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
8479   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
8480   const MDNode *Ranges = getRangeMetadata(VPIntrin);
8481   SDValue LD;
8482   if (!Alignment)
8483     Alignment = DAG.getEVTAlign(VT.getScalarType());
8484   unsigned AS =
8485     PtrOperand->getType()->getScalarType()->getPointerAddressSpace();
8486   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
8487       MachinePointerInfo(AS), MachineMemOperand::MOLoad,
8488       LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo, Ranges);
8489   SDValue Base, Index, Scale;
8490   ISD::MemIndexType IndexType;
8491   bool UniformBase = getUniformBase(PtrOperand, Base, Index, IndexType, Scale,
8492                                     this, VPIntrin.getParent(),
8493                                     VT.getScalarStoreSize());
8494   if (!UniformBase) {
8495     Base = DAG.getConstant(0, DL, TLI.getPointerTy(DAG.getDataLayout()));
8496     Index = getValue(PtrOperand);
8497     IndexType = ISD::SIGNED_SCALED;
8498     Scale = DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout()));
8499   }
8500   EVT IdxVT = Index.getValueType();
8501   EVT EltTy = IdxVT.getVectorElementType();
8502   if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
8503     EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
8504     Index = DAG.getNode(ISD::SIGN_EXTEND, DL, NewIdxVT, Index);
8505   }
8506   LD = DAG.getGatherVP(
8507       DAG.getVTList(VT, MVT::Other), VT, DL,
8508       {DAG.getRoot(), Base, Index, Scale, OpValues[1], OpValues[2]}, MMO,
8509       IndexType);
8510   PendingLoads.push_back(LD.getValue(1));
8511   setValue(&VPIntrin, LD);
8512 }
8513 
8514 void SelectionDAGBuilder::visitVPStore(
8515     const VPIntrinsic &VPIntrin, const SmallVectorImpl<SDValue> &OpValues) {
8516   SDLoc DL = getCurSDLoc();
8517   Value *PtrOperand = VPIntrin.getArgOperand(1);
8518   EVT VT = OpValues[0].getValueType();
8519   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
8520   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
8521   SDValue ST;
8522   if (!Alignment)
8523     Alignment = DAG.getEVTAlign(VT);
8524   SDValue Ptr = OpValues[1];
8525   SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
8526   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
8527       MachinePointerInfo(PtrOperand), MachineMemOperand::MOStore,
8528       LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo);
8529   ST = DAG.getStoreVP(getMemoryRoot(), DL, OpValues[0], Ptr, Offset,
8530                       OpValues[2], OpValues[3], VT, MMO, ISD::UNINDEXED,
8531                       /* IsTruncating */ false, /*IsCompressing*/ false);
8532   DAG.setRoot(ST);
8533   setValue(&VPIntrin, ST);
8534 }
8535 
8536 void SelectionDAGBuilder::visitVPScatter(
8537     const VPIntrinsic &VPIntrin, const SmallVectorImpl<SDValue> &OpValues) {
8538   SDLoc DL = getCurSDLoc();
8539   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8540   Value *PtrOperand = VPIntrin.getArgOperand(1);
8541   EVT VT = OpValues[0].getValueType();
8542   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
8543   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
8544   SDValue ST;
8545   if (!Alignment)
8546     Alignment = DAG.getEVTAlign(VT.getScalarType());
8547   unsigned AS =
8548       PtrOperand->getType()->getScalarType()->getPointerAddressSpace();
8549   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
8550       MachinePointerInfo(AS), MachineMemOperand::MOStore,
8551       LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo);
8552   SDValue Base, Index, Scale;
8553   ISD::MemIndexType IndexType;
8554   bool UniformBase = getUniformBase(PtrOperand, Base, Index, IndexType, Scale,
8555                                     this, VPIntrin.getParent(),
8556                                     VT.getScalarStoreSize());
8557   if (!UniformBase) {
8558     Base = DAG.getConstant(0, DL, TLI.getPointerTy(DAG.getDataLayout()));
8559     Index = getValue(PtrOperand);
8560     IndexType = ISD::SIGNED_SCALED;
8561     Scale =
8562       DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout()));
8563   }
8564   EVT IdxVT = Index.getValueType();
8565   EVT EltTy = IdxVT.getVectorElementType();
8566   if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
8567     EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
8568     Index = DAG.getNode(ISD::SIGN_EXTEND, DL, NewIdxVT, Index);
8569   }
8570   ST = DAG.getScatterVP(DAG.getVTList(MVT::Other), VT, DL,
8571                         {getMemoryRoot(), OpValues[0], Base, Index, Scale,
8572                          OpValues[2], OpValues[3]},
8573                         MMO, IndexType);
8574   DAG.setRoot(ST);
8575   setValue(&VPIntrin, ST);
8576 }
8577 
8578 void SelectionDAGBuilder::visitVPStridedLoad(
8579     const VPIntrinsic &VPIntrin, EVT VT,
8580     const SmallVectorImpl<SDValue> &OpValues) {
8581   SDLoc DL = getCurSDLoc();
8582   Value *PtrOperand = VPIntrin.getArgOperand(0);
8583   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
8584   if (!Alignment)
8585     Alignment = DAG.getEVTAlign(VT.getScalarType());
8586   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
8587   const MDNode *Ranges = getRangeMetadata(VPIntrin);
8588   MemoryLocation ML = MemoryLocation::getAfter(PtrOperand, AAInfo);
8589   bool AddToChain = !AA || !AA->pointsToConstantMemory(ML);
8590   SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
8591   unsigned AS = PtrOperand->getType()->getPointerAddressSpace();
8592   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
8593       MachinePointerInfo(AS), MachineMemOperand::MOLoad,
8594       LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo, Ranges);
8595 
8596   SDValue LD = DAG.getStridedLoadVP(VT, DL, InChain, OpValues[0], OpValues[1],
8597                                     OpValues[2], OpValues[3], MMO,
8598                                     false /*IsExpanding*/);
8599 
8600   if (AddToChain)
8601     PendingLoads.push_back(LD.getValue(1));
8602   setValue(&VPIntrin, LD);
8603 }
8604 
8605 void SelectionDAGBuilder::visitVPStridedStore(
8606     const VPIntrinsic &VPIntrin, const SmallVectorImpl<SDValue> &OpValues) {
8607   SDLoc DL = getCurSDLoc();
8608   Value *PtrOperand = VPIntrin.getArgOperand(1);
8609   EVT VT = OpValues[0].getValueType();
8610   MaybeAlign Alignment = VPIntrin.getPointerAlignment();
8611   if (!Alignment)
8612     Alignment = DAG.getEVTAlign(VT.getScalarType());
8613   AAMDNodes AAInfo = VPIntrin.getAAMetadata();
8614   unsigned AS = PtrOperand->getType()->getPointerAddressSpace();
8615   MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
8616       MachinePointerInfo(AS), MachineMemOperand::MOStore,
8617       LocationSize::beforeOrAfterPointer(), *Alignment, AAInfo);
8618 
8619   SDValue ST = DAG.getStridedStoreVP(
8620       getMemoryRoot(), DL, OpValues[0], OpValues[1],
8621       DAG.getUNDEF(OpValues[1].getValueType()), OpValues[2], OpValues[3],
8622       OpValues[4], VT, MMO, ISD::UNINDEXED, /*IsTruncating*/ false,
8623       /*IsCompressing*/ false);
8624 
8625   DAG.setRoot(ST);
8626   setValue(&VPIntrin, ST);
8627 }
8628 
8629 void SelectionDAGBuilder::visitVPCmp(const VPCmpIntrinsic &VPIntrin) {
8630   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8631   SDLoc DL = getCurSDLoc();
8632 
8633   ISD::CondCode Condition;
8634   CmpInst::Predicate CondCode = VPIntrin.getPredicate();
8635   bool IsFP = VPIntrin.getOperand(0)->getType()->isFPOrFPVectorTy();
8636   if (IsFP) {
8637     // FIXME: Regular fcmps are FPMathOperators which may have fast-math (nnan)
8638     // flags, but calls that don't return floating-point types can't be
8639     // FPMathOperators, like vp.fcmp. This affects constrained fcmp too.
8640     Condition = getFCmpCondCode(CondCode);
8641     if (TM.Options.NoNaNsFPMath)
8642       Condition = getFCmpCodeWithoutNaN(Condition);
8643   } else {
8644     Condition = getICmpCondCode(CondCode);
8645   }
8646 
8647   SDValue Op1 = getValue(VPIntrin.getOperand(0));
8648   SDValue Op2 = getValue(VPIntrin.getOperand(1));
8649   // #2 is the condition code
8650   SDValue MaskOp = getValue(VPIntrin.getOperand(3));
8651   SDValue EVL = getValue(VPIntrin.getOperand(4));
8652   MVT EVLParamVT = TLI.getVPExplicitVectorLengthTy();
8653   assert(EVLParamVT.isScalarInteger() && EVLParamVT.bitsGE(MVT::i32) &&
8654          "Unexpected target EVL type");
8655   EVL = DAG.getNode(ISD::ZERO_EXTEND, DL, EVLParamVT, EVL);
8656 
8657   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
8658                                                         VPIntrin.getType());
8659   setValue(&VPIntrin,
8660            DAG.getSetCCVP(DL, DestVT, Op1, Op2, Condition, MaskOp, EVL));
8661 }
8662 
8663 void SelectionDAGBuilder::visitVectorPredicationIntrinsic(
8664     const VPIntrinsic &VPIntrin) {
8665   SDLoc DL = getCurSDLoc();
8666   unsigned Opcode = getISDForVPIntrinsic(VPIntrin);
8667 
8668   auto IID = VPIntrin.getIntrinsicID();
8669 
8670   if (const auto *CmpI = dyn_cast<VPCmpIntrinsic>(&VPIntrin))
8671     return visitVPCmp(*CmpI);
8672 
8673   SmallVector<EVT, 4> ValueVTs;
8674   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8675   ComputeValueVTs(TLI, DAG.getDataLayout(), VPIntrin.getType(), ValueVTs);
8676   SDVTList VTs = DAG.getVTList(ValueVTs);
8677 
8678   auto EVLParamPos = VPIntrinsic::getVectorLengthParamPos(IID);
8679 
8680   MVT EVLParamVT = TLI.getVPExplicitVectorLengthTy();
8681   assert(EVLParamVT.isScalarInteger() && EVLParamVT.bitsGE(MVT::i32) &&
8682          "Unexpected target EVL type");
8683 
8684   // Request operands.
8685   SmallVector<SDValue, 7> OpValues;
8686   for (unsigned I = 0; I < VPIntrin.arg_size(); ++I) {
8687     auto Op = getValue(VPIntrin.getArgOperand(I));
8688     if (I == EVLParamPos)
8689       Op = DAG.getNode(ISD::ZERO_EXTEND, DL, EVLParamVT, Op);
8690     OpValues.push_back(Op);
8691   }
8692 
8693   switch (Opcode) {
8694   default: {
8695     SDNodeFlags SDFlags;
8696     if (auto *FPMO = dyn_cast<FPMathOperator>(&VPIntrin))
8697       SDFlags.copyFMF(*FPMO);
8698     SDValue Result = DAG.getNode(Opcode, DL, VTs, OpValues, SDFlags);
8699     setValue(&VPIntrin, Result);
8700     break;
8701   }
8702   case ISD::VP_LOAD:
8703     visitVPLoad(VPIntrin, ValueVTs[0], OpValues);
8704     break;
8705   case ISD::VP_GATHER:
8706     visitVPGather(VPIntrin, ValueVTs[0], OpValues);
8707     break;
8708   case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
8709     visitVPStridedLoad(VPIntrin, ValueVTs[0], OpValues);
8710     break;
8711   case ISD::VP_STORE:
8712     visitVPStore(VPIntrin, OpValues);
8713     break;
8714   case ISD::VP_SCATTER:
8715     visitVPScatter(VPIntrin, OpValues);
8716     break;
8717   case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
8718     visitVPStridedStore(VPIntrin, OpValues);
8719     break;
8720   case ISD::VP_FMULADD: {
8721     assert(OpValues.size() == 5 && "Unexpected number of operands");
8722     SDNodeFlags SDFlags;
8723     if (auto *FPMO = dyn_cast<FPMathOperator>(&VPIntrin))
8724       SDFlags.copyFMF(*FPMO);
8725     if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
8726         TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), ValueVTs[0])) {
8727       setValue(&VPIntrin, DAG.getNode(ISD::VP_FMA, DL, VTs, OpValues, SDFlags));
8728     } else {
8729       SDValue Mul = DAG.getNode(
8730           ISD::VP_FMUL, DL, VTs,
8731           {OpValues[0], OpValues[1], OpValues[3], OpValues[4]}, SDFlags);
8732       SDValue Add =
8733           DAG.getNode(ISD::VP_FADD, DL, VTs,
8734                       {Mul, OpValues[2], OpValues[3], OpValues[4]}, SDFlags);
8735       setValue(&VPIntrin, Add);
8736     }
8737     break;
8738   }
8739   case ISD::VP_IS_FPCLASS: {
8740     const DataLayout DLayout = DAG.getDataLayout();
8741     EVT DestVT = TLI.getValueType(DLayout, VPIntrin.getType());
8742     auto Constant = OpValues[1]->getAsZExtVal();
8743     SDValue Check = DAG.getTargetConstant(Constant, DL, MVT::i32);
8744     SDValue V = DAG.getNode(ISD::VP_IS_FPCLASS, DL, DestVT,
8745                             {OpValues[0], Check, OpValues[2], OpValues[3]});
8746     setValue(&VPIntrin, V);
8747     return;
8748   }
8749   case ISD::VP_INTTOPTR: {
8750     SDValue N = OpValues[0];
8751     EVT DestVT = TLI.getValueType(DAG.getDataLayout(), VPIntrin.getType());
8752     EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(), VPIntrin.getType());
8753     N = DAG.getVPPtrExtOrTrunc(getCurSDLoc(), DestVT, N, OpValues[1],
8754                                OpValues[2]);
8755     N = DAG.getVPZExtOrTrunc(getCurSDLoc(), PtrMemVT, N, OpValues[1],
8756                              OpValues[2]);
8757     setValue(&VPIntrin, N);
8758     break;
8759   }
8760   case ISD::VP_PTRTOINT: {
8761     SDValue N = OpValues[0];
8762     EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
8763                                                           VPIntrin.getType());
8764     EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(),
8765                                        VPIntrin.getOperand(0)->getType());
8766     N = DAG.getVPPtrExtOrTrunc(getCurSDLoc(), PtrMemVT, N, OpValues[1],
8767                                OpValues[2]);
8768     N = DAG.getVPZExtOrTrunc(getCurSDLoc(), DestVT, N, OpValues[1],
8769                              OpValues[2]);
8770     setValue(&VPIntrin, N);
8771     break;
8772   }
8773   case ISD::VP_ABS:
8774   case ISD::VP_CTLZ:
8775   case ISD::VP_CTLZ_ZERO_UNDEF:
8776   case ISD::VP_CTTZ:
8777   case ISD::VP_CTTZ_ZERO_UNDEF:
8778   case ISD::VP_CTTZ_ELTS_ZERO_UNDEF:
8779   case ISD::VP_CTTZ_ELTS: {
8780     SDValue Result =
8781         DAG.getNode(Opcode, DL, VTs, {OpValues[0], OpValues[2], OpValues[3]});
8782     setValue(&VPIntrin, Result);
8783     break;
8784   }
8785   }
8786 }
8787 
8788 SDValue SelectionDAGBuilder::lowerStartEH(SDValue Chain,
8789                                           const BasicBlock *EHPadBB,
8790                                           MCSymbol *&BeginLabel) {
8791   MachineFunction &MF = DAG.getMachineFunction();
8792 
8793   // Insert a label before the invoke call to mark the try range.  This can be
8794   // used to detect deletion of the invoke via the MachineModuleInfo.
8795   BeginLabel = MF.getContext().createTempSymbol();
8796 
8797   // For SjLj, keep track of which landing pads go with which invokes
8798   // so as to maintain the ordering of pads in the LSDA.
8799   unsigned CallSiteIndex = FuncInfo.getCurrentCallSite();
8800   if (CallSiteIndex) {
8801     MF.setCallSiteBeginLabel(BeginLabel, CallSiteIndex);
8802     LPadToCallSiteMap[FuncInfo.getMBB(EHPadBB)].push_back(CallSiteIndex);
8803 
8804     // Now that the call site is handled, stop tracking it.
8805     FuncInfo.setCurrentCallSite(0);
8806   }
8807 
8808   return DAG.getEHLabel(getCurSDLoc(), Chain, BeginLabel);
8809 }
8810 
8811 SDValue SelectionDAGBuilder::lowerEndEH(SDValue Chain, const InvokeInst *II,
8812                                         const BasicBlock *EHPadBB,
8813                                         MCSymbol *BeginLabel) {
8814   assert(BeginLabel && "BeginLabel should've been set");
8815 
8816   MachineFunction &MF = DAG.getMachineFunction();
8817 
8818   // Insert a label at the end of the invoke call to mark the try range.  This
8819   // can be used to detect deletion of the invoke via the MachineModuleInfo.
8820   MCSymbol *EndLabel = MF.getContext().createTempSymbol();
8821   Chain = DAG.getEHLabel(getCurSDLoc(), Chain, EndLabel);
8822 
8823   // Inform MachineModuleInfo of range.
8824   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
8825   // There is a platform (e.g. wasm) that uses funclet style IR but does not
8826   // actually use outlined funclets and their LSDA info style.
8827   if (MF.hasEHFunclets() && isFuncletEHPersonality(Pers)) {
8828     assert(II && "II should've been set");
8829     WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
8830     EHInfo->addIPToStateRange(II, BeginLabel, EndLabel);
8831   } else if (!isScopedEHPersonality(Pers)) {
8832     assert(EHPadBB);
8833     MF.addInvoke(FuncInfo.getMBB(EHPadBB), BeginLabel, EndLabel);
8834   }
8835 
8836   return Chain;
8837 }
8838 
8839 std::pair<SDValue, SDValue>
8840 SelectionDAGBuilder::lowerInvokable(TargetLowering::CallLoweringInfo &CLI,
8841                                     const BasicBlock *EHPadBB) {
8842   MCSymbol *BeginLabel = nullptr;
8843 
8844   if (EHPadBB) {
8845     // Both PendingLoads and PendingExports must be flushed here;
8846     // this call might not return.
8847     (void)getRoot();
8848     DAG.setRoot(lowerStartEH(getControlRoot(), EHPadBB, BeginLabel));
8849     CLI.setChain(getRoot());
8850   }
8851 
8852   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8853   std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
8854 
8855   assert((CLI.IsTailCall || Result.second.getNode()) &&
8856          "Non-null chain expected with non-tail call!");
8857   assert((Result.second.getNode() || !Result.first.getNode()) &&
8858          "Null value expected with tail call!");
8859 
8860   if (!Result.second.getNode()) {
8861     // As a special case, a null chain means that a tail call has been emitted
8862     // and the DAG root is already updated.
8863     HasTailCall = true;
8864 
8865     // Since there's no actual continuation from this block, nothing can be
8866     // relying on us setting vregs for them.
8867     PendingExports.clear();
8868   } else {
8869     DAG.setRoot(Result.second);
8870   }
8871 
8872   if (EHPadBB) {
8873     DAG.setRoot(lowerEndEH(getRoot(), cast_or_null<InvokeInst>(CLI.CB), EHPadBB,
8874                            BeginLabel));
8875     Result.second = getRoot();
8876   }
8877 
8878   return Result;
8879 }
8880 
8881 void SelectionDAGBuilder::LowerCallTo(const CallBase &CB, SDValue Callee,
8882                                       bool isTailCall, bool isMustTailCall,
8883                                       const BasicBlock *EHPadBB,
8884                                       const TargetLowering::PtrAuthInfo *PAI) {
8885   auto &DL = DAG.getDataLayout();
8886   FunctionType *FTy = CB.getFunctionType();
8887   Type *RetTy = CB.getType();
8888 
8889   TargetLowering::ArgListTy Args;
8890   Args.reserve(CB.arg_size());
8891 
8892   const Value *SwiftErrorVal = nullptr;
8893   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8894 
8895   if (isTailCall) {
8896     // Avoid emitting tail calls in functions with the disable-tail-calls
8897     // attribute.
8898     auto *Caller = CB.getParent()->getParent();
8899     if (Caller->getFnAttribute("disable-tail-calls").getValueAsString() ==
8900         "true" && !isMustTailCall)
8901       isTailCall = false;
8902 
8903     // We can't tail call inside a function with a swifterror argument. Lowering
8904     // does not support this yet. It would have to move into the swifterror
8905     // register before the call.
8906     if (TLI.supportSwiftError() &&
8907         Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
8908       isTailCall = false;
8909   }
8910 
8911   for (auto I = CB.arg_begin(), E = CB.arg_end(); I != E; ++I) {
8912     TargetLowering::ArgListEntry Entry;
8913     const Value *V = *I;
8914 
8915     // Skip empty types
8916     if (V->getType()->isEmptyTy())
8917       continue;
8918 
8919     SDValue ArgNode = getValue(V);
8920     Entry.Node = ArgNode; Entry.Ty = V->getType();
8921 
8922     Entry.setAttributes(&CB, I - CB.arg_begin());
8923 
8924     // Use swifterror virtual register as input to the call.
8925     if (Entry.IsSwiftError && TLI.supportSwiftError()) {
8926       SwiftErrorVal = V;
8927       // We find the virtual register for the actual swifterror argument.
8928       // Instead of using the Value, we use the virtual register instead.
8929       Entry.Node =
8930           DAG.getRegister(SwiftError.getOrCreateVRegUseAt(&CB, FuncInfo.MBB, V),
8931                           EVT(TLI.getPointerTy(DL)));
8932     }
8933 
8934     Args.push_back(Entry);
8935 
8936     // If we have an explicit sret argument that is an Instruction, (i.e., it
8937     // might point to function-local memory), we can't meaningfully tail-call.
8938     if (Entry.IsSRet && isa<Instruction>(V))
8939       isTailCall = false;
8940   }
8941 
8942   // If call site has a cfguardtarget operand bundle, create and add an
8943   // additional ArgListEntry.
8944   if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_cfguardtarget)) {
8945     TargetLowering::ArgListEntry Entry;
8946     Value *V = Bundle->Inputs[0];
8947     SDValue ArgNode = getValue(V);
8948     Entry.Node = ArgNode;
8949     Entry.Ty = V->getType();
8950     Entry.IsCFGuardTarget = true;
8951     Args.push_back(Entry);
8952   }
8953 
8954   // Check if target-independent constraints permit a tail call here.
8955   // Target-dependent constraints are checked within TLI->LowerCallTo.
8956   if (isTailCall && !isInTailCallPosition(CB, DAG.getTarget()))
8957     isTailCall = false;
8958 
8959   // Disable tail calls if there is an swifterror argument. Targets have not
8960   // been updated to support tail calls.
8961   if (TLI.supportSwiftError() && SwiftErrorVal)
8962     isTailCall = false;
8963 
8964   ConstantInt *CFIType = nullptr;
8965   if (CB.isIndirectCall()) {
8966     if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_kcfi)) {
8967       if (!TLI.supportKCFIBundles())
8968         report_fatal_error(
8969             "Target doesn't support calls with kcfi operand bundles.");
8970       CFIType = cast<ConstantInt>(Bundle->Inputs[0]);
8971       assert(CFIType->getType()->isIntegerTy(32) && "Invalid CFI type");
8972     }
8973   }
8974 
8975   SDValue ConvControlToken;
8976   if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_convergencectrl)) {
8977     auto *Token = Bundle->Inputs[0].get();
8978     ConvControlToken = getValue(Token);
8979   }
8980 
8981   TargetLowering::CallLoweringInfo CLI(DAG);
8982   CLI.setDebugLoc(getCurSDLoc())
8983       .setChain(getRoot())
8984       .setCallee(RetTy, FTy, Callee, std::move(Args), CB)
8985       .setTailCall(isTailCall)
8986       .setConvergent(CB.isConvergent())
8987       .setIsPreallocated(
8988           CB.countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0)
8989       .setCFIType(CFIType)
8990       .setConvergenceControlToken(ConvControlToken);
8991 
8992   // Set the pointer authentication info if we have it.
8993   if (PAI) {
8994     if (!TLI.supportPtrAuthBundles())
8995       report_fatal_error(
8996           "This target doesn't support calls with ptrauth operand bundles.");
8997     CLI.setPtrAuth(*PAI);
8998   }
8999 
9000   std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
9001 
9002   if (Result.first.getNode()) {
9003     Result.first = lowerRangeToAssertZExt(DAG, CB, Result.first);
9004     setValue(&CB, Result.first);
9005   }
9006 
9007   // The last element of CLI.InVals has the SDValue for swifterror return.
9008   // Here we copy it to a virtual register and update SwiftErrorMap for
9009   // book-keeping.
9010   if (SwiftErrorVal && TLI.supportSwiftError()) {
9011     // Get the last element of InVals.
9012     SDValue Src = CLI.InVals.back();
9013     Register VReg =
9014         SwiftError.getOrCreateVRegDefAt(&CB, FuncInfo.MBB, SwiftErrorVal);
9015     SDValue CopyNode = CLI.DAG.getCopyToReg(Result.second, CLI.DL, VReg, Src);
9016     DAG.setRoot(CopyNode);
9017   }
9018 }
9019 
9020 static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
9021                              SelectionDAGBuilder &Builder) {
9022   // Check to see if this load can be trivially constant folded, e.g. if the
9023   // input is from a string literal.
9024   if (const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) {
9025     // Cast pointer to the type we really want to load.
9026     Type *LoadTy =
9027         Type::getIntNTy(PtrVal->getContext(), LoadVT.getScalarSizeInBits());
9028     if (LoadVT.isVector())
9029       LoadTy = FixedVectorType::get(LoadTy, LoadVT.getVectorNumElements());
9030 
9031     LoadInput = ConstantExpr::getBitCast(const_cast<Constant *>(LoadInput),
9032                                          PointerType::getUnqual(LoadTy));
9033 
9034     if (const Constant *LoadCst =
9035             ConstantFoldLoadFromConstPtr(const_cast<Constant *>(LoadInput),
9036                                          LoadTy, Builder.DAG.getDataLayout()))
9037       return Builder.getValue(LoadCst);
9038   }
9039 
9040   // Otherwise, we have to emit the load.  If the pointer is to unfoldable but
9041   // still constant memory, the input chain can be the entry node.
9042   SDValue Root;
9043   bool ConstantMemory = false;
9044 
9045   // Do not serialize (non-volatile) loads of constant memory with anything.
9046   if (Builder.AA && Builder.AA->pointsToConstantMemory(PtrVal)) {
9047     Root = Builder.DAG.getEntryNode();
9048     ConstantMemory = true;
9049   } else {
9050     // Do not serialize non-volatile loads against each other.
9051     Root = Builder.DAG.getRoot();
9052   }
9053 
9054   SDValue Ptr = Builder.getValue(PtrVal);
9055   SDValue LoadVal =
9056       Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root, Ptr,
9057                           MachinePointerInfo(PtrVal), Align(1));
9058 
9059   if (!ConstantMemory)
9060     Builder.PendingLoads.push_back(LoadVal.getValue(1));
9061   return LoadVal;
9062 }
9063 
9064 /// Record the value for an instruction that produces an integer result,
9065 /// converting the type where necessary.
9066 void SelectionDAGBuilder::processIntegerCallValue(const Instruction &I,
9067                                                   SDValue Value,
9068                                                   bool IsSigned) {
9069   EVT VT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
9070                                                     I.getType(), true);
9071   Value = DAG.getExtOrTrunc(IsSigned, Value, getCurSDLoc(), VT);
9072   setValue(&I, Value);
9073 }
9074 
9075 /// See if we can lower a memcmp/bcmp call into an optimized form. If so, return
9076 /// true and lower it. Otherwise return false, and it will be lowered like a
9077 /// normal call.
9078 /// The caller already checked that \p I calls the appropriate LibFunc with a
9079 /// correct prototype.
9080 bool SelectionDAGBuilder::visitMemCmpBCmpCall(const CallInst &I) {
9081   const Value *LHS = I.getArgOperand(0), *RHS = I.getArgOperand(1);
9082   const Value *Size = I.getArgOperand(2);
9083   const ConstantSDNode *CSize = dyn_cast<ConstantSDNode>(getValue(Size));
9084   if (CSize && CSize->getZExtValue() == 0) {
9085     EVT CallVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
9086                                                           I.getType(), true);
9087     setValue(&I, DAG.getConstant(0, getCurSDLoc(), CallVT));
9088     return true;
9089   }
9090 
9091   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
9092   std::pair<SDValue, SDValue> Res = TSI.EmitTargetCodeForMemcmp(
9093       DAG, getCurSDLoc(), DAG.getRoot(), getValue(LHS), getValue(RHS),
9094       getValue(Size), MachinePointerInfo(LHS), MachinePointerInfo(RHS));
9095   if (Res.first.getNode()) {
9096     processIntegerCallValue(I, Res.first, true);
9097     PendingLoads.push_back(Res.second);
9098     return true;
9099   }
9100 
9101   // memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS)  != 0
9102   // memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS)  != 0
9103   if (!CSize || !isOnlyUsedInZeroEqualityComparison(&I))
9104     return false;
9105 
9106   // If the target has a fast compare for the given size, it will return a
9107   // preferred load type for that size. Require that the load VT is legal and
9108   // that the target supports unaligned loads of that type. Otherwise, return
9109   // INVALID.
9110   auto hasFastLoadsAndCompare = [&](unsigned NumBits) {
9111     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9112     MVT LVT = TLI.hasFastEqualityCompare(NumBits);
9113     if (LVT != MVT::INVALID_SIMPLE_VALUE_TYPE) {
9114       // TODO: Handle 5 byte compare as 4-byte + 1 byte.
9115       // TODO: Handle 8 byte compare on x86-32 as two 32-bit loads.
9116       // TODO: Check alignment of src and dest ptrs.
9117       unsigned DstAS = LHS->getType()->getPointerAddressSpace();
9118       unsigned SrcAS = RHS->getType()->getPointerAddressSpace();
9119       if (!TLI.isTypeLegal(LVT) ||
9120           !TLI.allowsMisalignedMemoryAccesses(LVT, SrcAS) ||
9121           !TLI.allowsMisalignedMemoryAccesses(LVT, DstAS))
9122         LVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
9123     }
9124 
9125     return LVT;
9126   };
9127 
9128   // This turns into unaligned loads. We only do this if the target natively
9129   // supports the MVT we'll be loading or if it is small enough (<= 4) that
9130   // we'll only produce a small number of byte loads.
9131   MVT LoadVT;
9132   unsigned NumBitsToCompare = CSize->getZExtValue() * 8;
9133   switch (NumBitsToCompare) {
9134   default:
9135     return false;
9136   case 16:
9137     LoadVT = MVT::i16;
9138     break;
9139   case 32:
9140     LoadVT = MVT::i32;
9141     break;
9142   case 64:
9143   case 128:
9144   case 256:
9145     LoadVT = hasFastLoadsAndCompare(NumBitsToCompare);
9146     break;
9147   }
9148 
9149   if (LoadVT == MVT::INVALID_SIMPLE_VALUE_TYPE)
9150     return false;
9151 
9152   SDValue LoadL = getMemCmpLoad(LHS, LoadVT, *this);
9153   SDValue LoadR = getMemCmpLoad(RHS, LoadVT, *this);
9154 
9155   // Bitcast to a wide integer type if the loads are vectors.
9156   if (LoadVT.isVector()) {
9157     EVT CmpVT = EVT::getIntegerVT(LHS->getContext(), LoadVT.getSizeInBits());
9158     LoadL = DAG.getBitcast(CmpVT, LoadL);
9159     LoadR = DAG.getBitcast(CmpVT, LoadR);
9160   }
9161 
9162   SDValue Cmp = DAG.getSetCC(getCurSDLoc(), MVT::i1, LoadL, LoadR, ISD::SETNE);
9163   processIntegerCallValue(I, Cmp, false);
9164   return true;
9165 }
9166 
9167 /// See if we can lower a memchr call into an optimized form. If so, return
9168 /// true and lower it. Otherwise return false, and it will be lowered like a
9169 /// normal call.
9170 /// The caller already checked that \p I calls the appropriate LibFunc with a
9171 /// correct prototype.
9172 bool SelectionDAGBuilder::visitMemChrCall(const CallInst &I) {
9173   const Value *Src = I.getArgOperand(0);
9174   const Value *Char = I.getArgOperand(1);
9175   const Value *Length = I.getArgOperand(2);
9176 
9177   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
9178   std::pair<SDValue, SDValue> Res =
9179     TSI.EmitTargetCodeForMemchr(DAG, getCurSDLoc(), DAG.getRoot(),
9180                                 getValue(Src), getValue(Char), getValue(Length),
9181                                 MachinePointerInfo(Src));
9182   if (Res.first.getNode()) {
9183     setValue(&I, Res.first);
9184     PendingLoads.push_back(Res.second);
9185     return true;
9186   }
9187 
9188   return false;
9189 }
9190 
9191 /// See if we can lower a mempcpy call into an optimized form. If so, return
9192 /// true and lower it. Otherwise return false, and it will be lowered like a
9193 /// normal call.
9194 /// The caller already checked that \p I calls the appropriate LibFunc with a
9195 /// correct prototype.
9196 bool SelectionDAGBuilder::visitMemPCpyCall(const CallInst &I) {
9197   SDValue Dst = getValue(I.getArgOperand(0));
9198   SDValue Src = getValue(I.getArgOperand(1));
9199   SDValue Size = getValue(I.getArgOperand(2));
9200 
9201   Align DstAlign = DAG.InferPtrAlign(Dst).valueOrOne();
9202   Align SrcAlign = DAG.InferPtrAlign(Src).valueOrOne();
9203   // DAG::getMemcpy needs Alignment to be defined.
9204   Align Alignment = std::min(DstAlign, SrcAlign);
9205 
9206   SDLoc sdl = getCurSDLoc();
9207 
9208   // In the mempcpy context we need to pass in a false value for isTailCall
9209   // because the return pointer needs to be adjusted by the size of
9210   // the copied memory.
9211   SDValue Root = getMemoryRoot();
9212   SDValue MC = DAG.getMemcpy(
9213       Root, sdl, Dst, Src, Size, Alignment, false, false, /*CI=*/nullptr,
9214       std::nullopt, MachinePointerInfo(I.getArgOperand(0)),
9215       MachinePointerInfo(I.getArgOperand(1)), I.getAAMetadata());
9216   assert(MC.getNode() != nullptr &&
9217          "** memcpy should not be lowered as TailCall in mempcpy context **");
9218   DAG.setRoot(MC);
9219 
9220   // Check if Size needs to be truncated or extended.
9221   Size = DAG.getSExtOrTrunc(Size, sdl, Dst.getValueType());
9222 
9223   // Adjust return pointer to point just past the last dst byte.
9224   SDValue DstPlusSize = DAG.getNode(ISD::ADD, sdl, Dst.getValueType(),
9225                                     Dst, Size);
9226   setValue(&I, DstPlusSize);
9227   return true;
9228 }
9229 
9230 /// See if we can lower a strcpy call into an optimized form.  If so, return
9231 /// true and lower it, otherwise return false and it will be lowered like a
9232 /// normal call.
9233 /// The caller already checked that \p I calls the appropriate LibFunc with a
9234 /// correct prototype.
9235 bool SelectionDAGBuilder::visitStrCpyCall(const CallInst &I, bool isStpcpy) {
9236   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
9237 
9238   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
9239   std::pair<SDValue, SDValue> Res =
9240     TSI.EmitTargetCodeForStrcpy(DAG, getCurSDLoc(), getRoot(),
9241                                 getValue(Arg0), getValue(Arg1),
9242                                 MachinePointerInfo(Arg0),
9243                                 MachinePointerInfo(Arg1), isStpcpy);
9244   if (Res.first.getNode()) {
9245     setValue(&I, Res.first);
9246     DAG.setRoot(Res.second);
9247     return true;
9248   }
9249 
9250   return false;
9251 }
9252 
9253 /// See if we can lower a strcmp call into an optimized form.  If so, return
9254 /// true and lower it, otherwise return false and it will be lowered like a
9255 /// normal call.
9256 /// The caller already checked that \p I calls the appropriate LibFunc with a
9257 /// correct prototype.
9258 bool SelectionDAGBuilder::visitStrCmpCall(const CallInst &I) {
9259   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
9260 
9261   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
9262   std::pair<SDValue, SDValue> Res =
9263     TSI.EmitTargetCodeForStrcmp(DAG, getCurSDLoc(), DAG.getRoot(),
9264                                 getValue(Arg0), getValue(Arg1),
9265                                 MachinePointerInfo(Arg0),
9266                                 MachinePointerInfo(Arg1));
9267   if (Res.first.getNode()) {
9268     processIntegerCallValue(I, Res.first, true);
9269     PendingLoads.push_back(Res.second);
9270     return true;
9271   }
9272 
9273   return false;
9274 }
9275 
9276 /// See if we can lower a strlen call into an optimized form.  If so, return
9277 /// true and lower it, otherwise return false and it will be lowered like a
9278 /// normal call.
9279 /// The caller already checked that \p I calls the appropriate LibFunc with a
9280 /// correct prototype.
9281 bool SelectionDAGBuilder::visitStrLenCall(const CallInst &I) {
9282   const Value *Arg0 = I.getArgOperand(0);
9283 
9284   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
9285   std::pair<SDValue, SDValue> Res =
9286     TSI.EmitTargetCodeForStrlen(DAG, getCurSDLoc(), DAG.getRoot(),
9287                                 getValue(Arg0), MachinePointerInfo(Arg0));
9288   if (Res.first.getNode()) {
9289     processIntegerCallValue(I, Res.first, false);
9290     PendingLoads.push_back(Res.second);
9291     return true;
9292   }
9293 
9294   return false;
9295 }
9296 
9297 /// See if we can lower a strnlen call into an optimized form.  If so, return
9298 /// true and lower it, otherwise return false and it will be lowered like a
9299 /// normal call.
9300 /// The caller already checked that \p I calls the appropriate LibFunc with a
9301 /// correct prototype.
9302 bool SelectionDAGBuilder::visitStrNLenCall(const CallInst &I) {
9303   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
9304 
9305   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
9306   std::pair<SDValue, SDValue> Res =
9307     TSI.EmitTargetCodeForStrnlen(DAG, getCurSDLoc(), DAG.getRoot(),
9308                                  getValue(Arg0), getValue(Arg1),
9309                                  MachinePointerInfo(Arg0));
9310   if (Res.first.getNode()) {
9311     processIntegerCallValue(I, Res.first, false);
9312     PendingLoads.push_back(Res.second);
9313     return true;
9314   }
9315 
9316   return false;
9317 }
9318 
9319 /// See if we can lower a unary floating-point operation into an SDNode with
9320 /// the specified Opcode.  If so, return true and lower it, otherwise return
9321 /// false and it will be lowered like a normal call.
9322 /// The caller already checked that \p I calls the appropriate LibFunc with a
9323 /// correct prototype.
9324 bool SelectionDAGBuilder::visitUnaryFloatCall(const CallInst &I,
9325                                               unsigned Opcode) {
9326   // We already checked this call's prototype; verify it doesn't modify errno.
9327   if (!I.onlyReadsMemory())
9328     return false;
9329 
9330   SDNodeFlags Flags;
9331   Flags.copyFMF(cast<FPMathOperator>(I));
9332 
9333   SDValue Tmp = getValue(I.getArgOperand(0));
9334   setValue(&I,
9335            DAG.getNode(Opcode, getCurSDLoc(), Tmp.getValueType(), Tmp, Flags));
9336   return true;
9337 }
9338 
9339 /// See if we can lower a binary floating-point operation into an SDNode with
9340 /// the specified Opcode. If so, return true and lower it. Otherwise return
9341 /// false, and it will be lowered like a normal call.
9342 /// The caller already checked that \p I calls the appropriate LibFunc with a
9343 /// correct prototype.
9344 bool SelectionDAGBuilder::visitBinaryFloatCall(const CallInst &I,
9345                                                unsigned Opcode) {
9346   // We already checked this call's prototype; verify it doesn't modify errno.
9347   if (!I.onlyReadsMemory())
9348     return false;
9349 
9350   SDNodeFlags Flags;
9351   Flags.copyFMF(cast<FPMathOperator>(I));
9352 
9353   SDValue Tmp0 = getValue(I.getArgOperand(0));
9354   SDValue Tmp1 = getValue(I.getArgOperand(1));
9355   EVT VT = Tmp0.getValueType();
9356   setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), VT, Tmp0, Tmp1, Flags));
9357   return true;
9358 }
9359 
9360 void SelectionDAGBuilder::visitCall(const CallInst &I) {
9361   // Handle inline assembly differently.
9362   if (I.isInlineAsm()) {
9363     visitInlineAsm(I);
9364     return;
9365   }
9366 
9367   diagnoseDontCall(I);
9368 
9369   if (Function *F = I.getCalledFunction()) {
9370     if (F->isDeclaration()) {
9371       // Is this an LLVM intrinsic or a target-specific intrinsic?
9372       unsigned IID = F->getIntrinsicID();
9373       if (!IID)
9374         if (const TargetIntrinsicInfo *II = TM.getIntrinsicInfo())
9375           IID = II->getIntrinsicID(F);
9376 
9377       if (IID) {
9378         visitIntrinsicCall(I, IID);
9379         return;
9380       }
9381     }
9382 
9383     // Check for well-known libc/libm calls.  If the function is internal, it
9384     // can't be a library call.  Don't do the check if marked as nobuiltin for
9385     // some reason or the call site requires strict floating point semantics.
9386     LibFunc Func;
9387     if (!I.isNoBuiltin() && !I.isStrictFP() && !F->hasLocalLinkage() &&
9388         F->hasName() && LibInfo->getLibFunc(*F, Func) &&
9389         LibInfo->hasOptimizedCodeGen(Func)) {
9390       switch (Func) {
9391       default: break;
9392       case LibFunc_bcmp:
9393         if (visitMemCmpBCmpCall(I))
9394           return;
9395         break;
9396       case LibFunc_copysign:
9397       case LibFunc_copysignf:
9398       case LibFunc_copysignl:
9399         // We already checked this call's prototype; verify it doesn't modify
9400         // errno.
9401         if (I.onlyReadsMemory()) {
9402           SDValue LHS = getValue(I.getArgOperand(0));
9403           SDValue RHS = getValue(I.getArgOperand(1));
9404           setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurSDLoc(),
9405                                    LHS.getValueType(), LHS, RHS));
9406           return;
9407         }
9408         break;
9409       case LibFunc_fabs:
9410       case LibFunc_fabsf:
9411       case LibFunc_fabsl:
9412         if (visitUnaryFloatCall(I, ISD::FABS))
9413           return;
9414         break;
9415       case LibFunc_fmin:
9416       case LibFunc_fminf:
9417       case LibFunc_fminl:
9418         if (visitBinaryFloatCall(I, ISD::FMINNUM))
9419           return;
9420         break;
9421       case LibFunc_fmax:
9422       case LibFunc_fmaxf:
9423       case LibFunc_fmaxl:
9424         if (visitBinaryFloatCall(I, ISD::FMAXNUM))
9425           return;
9426         break;
9427       case LibFunc_fminimum_num:
9428       case LibFunc_fminimum_numf:
9429       case LibFunc_fminimum_numl:
9430         if (visitBinaryFloatCall(I, ISD::FMINIMUMNUM))
9431           return;
9432         break;
9433       case LibFunc_fmaximum_num:
9434       case LibFunc_fmaximum_numf:
9435       case LibFunc_fmaximum_numl:
9436         if (visitBinaryFloatCall(I, ISD::FMAXIMUMNUM))
9437           return;
9438         break;
9439       case LibFunc_sin:
9440       case LibFunc_sinf:
9441       case LibFunc_sinl:
9442         if (visitUnaryFloatCall(I, ISD::FSIN))
9443           return;
9444         break;
9445       case LibFunc_cos:
9446       case LibFunc_cosf:
9447       case LibFunc_cosl:
9448         if (visitUnaryFloatCall(I, ISD::FCOS))
9449           return;
9450         break;
9451       case LibFunc_tan:
9452       case LibFunc_tanf:
9453       case LibFunc_tanl:
9454         if (visitUnaryFloatCall(I, ISD::FTAN))
9455           return;
9456         break;
9457       case LibFunc_asin:
9458       case LibFunc_asinf:
9459       case LibFunc_asinl:
9460         if (visitUnaryFloatCall(I, ISD::FASIN))
9461           return;
9462         break;
9463       case LibFunc_acos:
9464       case LibFunc_acosf:
9465       case LibFunc_acosl:
9466         if (visitUnaryFloatCall(I, ISD::FACOS))
9467           return;
9468         break;
9469       case LibFunc_atan:
9470       case LibFunc_atanf:
9471       case LibFunc_atanl:
9472         if (visitUnaryFloatCall(I, ISD::FATAN))
9473           return;
9474         break;
9475       case LibFunc_atan2:
9476       case LibFunc_atan2f:
9477       case LibFunc_atan2l:
9478         if (visitBinaryFloatCall(I, ISD::FATAN2))
9479           return;
9480         break;
9481       case LibFunc_sinh:
9482       case LibFunc_sinhf:
9483       case LibFunc_sinhl:
9484         if (visitUnaryFloatCall(I, ISD::FSINH))
9485           return;
9486         break;
9487       case LibFunc_cosh:
9488       case LibFunc_coshf:
9489       case LibFunc_coshl:
9490         if (visitUnaryFloatCall(I, ISD::FCOSH))
9491           return;
9492         break;
9493       case LibFunc_tanh:
9494       case LibFunc_tanhf:
9495       case LibFunc_tanhl:
9496         if (visitUnaryFloatCall(I, ISD::FTANH))
9497           return;
9498         break;
9499       case LibFunc_sqrt:
9500       case LibFunc_sqrtf:
9501       case LibFunc_sqrtl:
9502       case LibFunc_sqrt_finite:
9503       case LibFunc_sqrtf_finite:
9504       case LibFunc_sqrtl_finite:
9505         if (visitUnaryFloatCall(I, ISD::FSQRT))
9506           return;
9507         break;
9508       case LibFunc_floor:
9509       case LibFunc_floorf:
9510       case LibFunc_floorl:
9511         if (visitUnaryFloatCall(I, ISD::FFLOOR))
9512           return;
9513         break;
9514       case LibFunc_nearbyint:
9515       case LibFunc_nearbyintf:
9516       case LibFunc_nearbyintl:
9517         if (visitUnaryFloatCall(I, ISD::FNEARBYINT))
9518           return;
9519         break;
9520       case LibFunc_ceil:
9521       case LibFunc_ceilf:
9522       case LibFunc_ceill:
9523         if (visitUnaryFloatCall(I, ISD::FCEIL))
9524           return;
9525         break;
9526       case LibFunc_rint:
9527       case LibFunc_rintf:
9528       case LibFunc_rintl:
9529         if (visitUnaryFloatCall(I, ISD::FRINT))
9530           return;
9531         break;
9532       case LibFunc_round:
9533       case LibFunc_roundf:
9534       case LibFunc_roundl:
9535         if (visitUnaryFloatCall(I, ISD::FROUND))
9536           return;
9537         break;
9538       case LibFunc_trunc:
9539       case LibFunc_truncf:
9540       case LibFunc_truncl:
9541         if (visitUnaryFloatCall(I, ISD::FTRUNC))
9542           return;
9543         break;
9544       case LibFunc_log2:
9545       case LibFunc_log2f:
9546       case LibFunc_log2l:
9547         if (visitUnaryFloatCall(I, ISD::FLOG2))
9548           return;
9549         break;
9550       case LibFunc_exp2:
9551       case LibFunc_exp2f:
9552       case LibFunc_exp2l:
9553         if (visitUnaryFloatCall(I, ISD::FEXP2))
9554           return;
9555         break;
9556       case LibFunc_exp10:
9557       case LibFunc_exp10f:
9558       case LibFunc_exp10l:
9559         if (visitUnaryFloatCall(I, ISD::FEXP10))
9560           return;
9561         break;
9562       case LibFunc_ldexp:
9563       case LibFunc_ldexpf:
9564       case LibFunc_ldexpl:
9565         if (visitBinaryFloatCall(I, ISD::FLDEXP))
9566           return;
9567         break;
9568       case LibFunc_memcmp:
9569         if (visitMemCmpBCmpCall(I))
9570           return;
9571         break;
9572       case LibFunc_mempcpy:
9573         if (visitMemPCpyCall(I))
9574           return;
9575         break;
9576       case LibFunc_memchr:
9577         if (visitMemChrCall(I))
9578           return;
9579         break;
9580       case LibFunc_strcpy:
9581         if (visitStrCpyCall(I, false))
9582           return;
9583         break;
9584       case LibFunc_stpcpy:
9585         if (visitStrCpyCall(I, true))
9586           return;
9587         break;
9588       case LibFunc_strcmp:
9589         if (visitStrCmpCall(I))
9590           return;
9591         break;
9592       case LibFunc_strlen:
9593         if (visitStrLenCall(I))
9594           return;
9595         break;
9596       case LibFunc_strnlen:
9597         if (visitStrNLenCall(I))
9598           return;
9599         break;
9600       }
9601     }
9602   }
9603 
9604   if (I.countOperandBundlesOfType(LLVMContext::OB_ptrauth)) {
9605     LowerCallSiteWithPtrAuthBundle(cast<CallBase>(I), /*EHPadBB=*/nullptr);
9606     return;
9607   }
9608 
9609   // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
9610   // have to do anything here to lower funclet bundles.
9611   // CFGuardTarget bundles are lowered in LowerCallTo.
9612   assert(!I.hasOperandBundlesOtherThan(
9613              {LLVMContext::OB_deopt, LLVMContext::OB_funclet,
9614               LLVMContext::OB_cfguardtarget, LLVMContext::OB_preallocated,
9615               LLVMContext::OB_clang_arc_attachedcall, LLVMContext::OB_kcfi,
9616               LLVMContext::OB_convergencectrl}) &&
9617          "Cannot lower calls with arbitrary operand bundles!");
9618 
9619   SDValue Callee = getValue(I.getCalledOperand());
9620 
9621   if (I.hasDeoptState())
9622     LowerCallSiteWithDeoptBundle(&I, Callee, nullptr);
9623   else
9624     // Check if we can potentially perform a tail call. More detailed checking
9625     // is be done within LowerCallTo, after more information about the call is
9626     // known.
9627     LowerCallTo(I, Callee, I.isTailCall(), I.isMustTailCall());
9628 }
9629 
9630 void SelectionDAGBuilder::LowerCallSiteWithPtrAuthBundle(
9631     const CallBase &CB, const BasicBlock *EHPadBB) {
9632   auto PAB = CB.getOperandBundle("ptrauth");
9633   const Value *CalleeV = CB.getCalledOperand();
9634 
9635   // Gather the call ptrauth data from the operand bundle:
9636   //   [ i32 <key>, i64 <discriminator> ]
9637   const auto *Key = cast<ConstantInt>(PAB->Inputs[0]);
9638   const Value *Discriminator = PAB->Inputs[1];
9639 
9640   assert(Key->getType()->isIntegerTy(32) && "Invalid ptrauth key");
9641   assert(Discriminator->getType()->isIntegerTy(64) &&
9642          "Invalid ptrauth discriminator");
9643 
9644   // Look through ptrauth constants to find the raw callee.
9645   // Do a direct unauthenticated call if we found it and everything matches.
9646   if (const auto *CalleeCPA = dyn_cast<ConstantPtrAuth>(CalleeV))
9647     if (CalleeCPA->isKnownCompatibleWith(Key, Discriminator,
9648                                          DAG.getDataLayout()))
9649       return LowerCallTo(CB, getValue(CalleeCPA->getPointer()), CB.isTailCall(),
9650                          CB.isMustTailCall(), EHPadBB);
9651 
9652   // Functions should never be ptrauth-called directly.
9653   assert(!isa<Function>(CalleeV) && "invalid direct ptrauth call");
9654 
9655   // Otherwise, do an authenticated indirect call.
9656   TargetLowering::PtrAuthInfo PAI = {Key->getZExtValue(),
9657                                      getValue(Discriminator)};
9658 
9659   LowerCallTo(CB, getValue(CalleeV), CB.isTailCall(), CB.isMustTailCall(),
9660               EHPadBB, &PAI);
9661 }
9662 
9663 namespace {
9664 
9665 /// AsmOperandInfo - This contains information for each constraint that we are
9666 /// lowering.
9667 class SDISelAsmOperandInfo : public TargetLowering::AsmOperandInfo {
9668 public:
9669   /// CallOperand - If this is the result output operand or a clobber
9670   /// this is null, otherwise it is the incoming operand to the CallInst.
9671   /// This gets modified as the asm is processed.
9672   SDValue CallOperand;
9673 
9674   /// AssignedRegs - If this is a register or register class operand, this
9675   /// contains the set of register corresponding to the operand.
9676   RegsForValue AssignedRegs;
9677 
9678   explicit SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &info)
9679     : TargetLowering::AsmOperandInfo(info), CallOperand(nullptr, 0) {
9680   }
9681 
9682   /// Whether or not this operand accesses memory
9683   bool hasMemory(const TargetLowering &TLI) const {
9684     // Indirect operand accesses access memory.
9685     if (isIndirect)
9686       return true;
9687 
9688     for (const auto &Code : Codes)
9689       if (TLI.getConstraintType(Code) == TargetLowering::C_Memory)
9690         return true;
9691 
9692     return false;
9693   }
9694 };
9695 
9696 
9697 } // end anonymous namespace
9698 
9699 /// Make sure that the output operand \p OpInfo and its corresponding input
9700 /// operand \p MatchingOpInfo have compatible constraint types (otherwise error
9701 /// out).
9702 static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo,
9703                                SDISelAsmOperandInfo &MatchingOpInfo,
9704                                SelectionDAG &DAG) {
9705   if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT)
9706     return;
9707 
9708   const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
9709   const auto &TLI = DAG.getTargetLoweringInfo();
9710 
9711   std::pair<unsigned, const TargetRegisterClass *> MatchRC =
9712       TLI.getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode,
9713                                        OpInfo.ConstraintVT);
9714   std::pair<unsigned, const TargetRegisterClass *> InputRC =
9715       TLI.getRegForInlineAsmConstraint(TRI, MatchingOpInfo.ConstraintCode,
9716                                        MatchingOpInfo.ConstraintVT);
9717   const bool OutOpIsIntOrFP =
9718       OpInfo.ConstraintVT.isInteger() || OpInfo.ConstraintVT.isFloatingPoint();
9719   const bool InOpIsIntOrFP = MatchingOpInfo.ConstraintVT.isInteger() ||
9720                              MatchingOpInfo.ConstraintVT.isFloatingPoint();
9721   if ((OutOpIsIntOrFP != InOpIsIntOrFP) || (MatchRC.second != InputRC.second)) {
9722     // FIXME: error out in a more elegant fashion
9723     report_fatal_error("Unsupported asm: input constraint"
9724                        " with a matching output constraint of"
9725                        " incompatible type!");
9726   }
9727   MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT;
9728 }
9729 
9730 /// Get a direct memory input to behave well as an indirect operand.
9731 /// This may introduce stores, hence the need for a \p Chain.
9732 /// \return The (possibly updated) chain.
9733 static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location,
9734                                         SDISelAsmOperandInfo &OpInfo,
9735                                         SelectionDAG &DAG) {
9736   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9737 
9738   // If we don't have an indirect input, put it in the constpool if we can,
9739   // otherwise spill it to a stack slot.
9740   // TODO: This isn't quite right. We need to handle these according to
9741   // the addressing mode that the constraint wants. Also, this may take
9742   // an additional register for the computation and we don't want that
9743   // either.
9744 
9745   // If the operand is a float, integer, or vector constant, spill to a
9746   // constant pool entry to get its address.
9747   const Value *OpVal = OpInfo.CallOperandVal;
9748   if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
9749       isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) {
9750     OpInfo.CallOperand = DAG.getConstantPool(
9751         cast<Constant>(OpVal), TLI.getPointerTy(DAG.getDataLayout()));
9752     return Chain;
9753   }
9754 
9755   // Otherwise, create a stack slot and emit a store to it before the asm.
9756   Type *Ty = OpVal->getType();
9757   auto &DL = DAG.getDataLayout();
9758   TypeSize TySize = DL.getTypeAllocSize(Ty);
9759   MachineFunction &MF = DAG.getMachineFunction();
9760   const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
9761   int StackID = 0;
9762   if (TySize.isScalable())
9763     StackID = TFI->getStackIDForScalableVectors();
9764   int SSFI = MF.getFrameInfo().CreateStackObject(TySize.getKnownMinValue(),
9765                                                  DL.getPrefTypeAlign(Ty), false,
9766                                                  nullptr, StackID);
9767   SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getFrameIndexTy(DL));
9768   Chain = DAG.getTruncStore(Chain, Location, OpInfo.CallOperand, StackSlot,
9769                             MachinePointerInfo::getFixedStack(MF, SSFI),
9770                             TLI.getMemValueType(DL, Ty));
9771   OpInfo.CallOperand = StackSlot;
9772 
9773   return Chain;
9774 }
9775 
9776 /// GetRegistersForValue - Assign registers (virtual or physical) for the
9777 /// specified operand.  We prefer to assign virtual registers, to allow the
9778 /// register allocator to handle the assignment process.  However, if the asm
9779 /// uses features that we can't model on machineinstrs, we have SDISel do the
9780 /// allocation.  This produces generally horrible, but correct, code.
9781 ///
9782 ///   OpInfo describes the operand
9783 ///   RefOpInfo describes the matching operand if any, the operand otherwise
9784 static std::optional<unsigned>
9785 getRegistersForValue(SelectionDAG &DAG, const SDLoc &DL,
9786                      SDISelAsmOperandInfo &OpInfo,
9787                      SDISelAsmOperandInfo &RefOpInfo) {
9788   LLVMContext &Context = *DAG.getContext();
9789   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9790 
9791   MachineFunction &MF = DAG.getMachineFunction();
9792   SmallVector<Register, 4> Regs;
9793   const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
9794 
9795   // No work to do for memory/address operands.
9796   if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
9797       OpInfo.ConstraintType == TargetLowering::C_Address)
9798     return std::nullopt;
9799 
9800   // If this is a constraint for a single physreg, or a constraint for a
9801   // register class, find it.
9802   unsigned AssignedReg;
9803   const TargetRegisterClass *RC;
9804   std::tie(AssignedReg, RC) = TLI.getRegForInlineAsmConstraint(
9805       &TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT);
9806   // RC is unset only on failure. Return immediately.
9807   if (!RC)
9808     return std::nullopt;
9809 
9810   // Get the actual register value type.  This is important, because the user
9811   // may have asked for (e.g.) the AX register in i32 type.  We need to
9812   // remember that AX is actually i16 to get the right extension.
9813   const MVT RegVT = *TRI.legalclasstypes_begin(*RC);
9814 
9815   if (OpInfo.ConstraintVT != MVT::Other && RegVT != MVT::Untyped) {
9816     // If this is an FP operand in an integer register (or visa versa), or more
9817     // generally if the operand value disagrees with the register class we plan
9818     // to stick it in, fix the operand type.
9819     //
9820     // If this is an input value, the bitcast to the new type is done now.
9821     // Bitcast for output value is done at the end of visitInlineAsm().
9822     if ((OpInfo.Type == InlineAsm::isOutput ||
9823          OpInfo.Type == InlineAsm::isInput) &&
9824         !TRI.isTypeLegalForClass(*RC, OpInfo.ConstraintVT)) {
9825       // Try to convert to the first EVT that the reg class contains.  If the
9826       // types are identical size, use a bitcast to convert (e.g. two differing
9827       // vector types).  Note: output bitcast is done at the end of
9828       // visitInlineAsm().
9829       if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
9830         // Exclude indirect inputs while they are unsupported because the code
9831         // to perform the load is missing and thus OpInfo.CallOperand still
9832         // refers to the input address rather than the pointed-to value.
9833         if (OpInfo.Type == InlineAsm::isInput && !OpInfo.isIndirect)
9834           OpInfo.CallOperand =
9835               DAG.getNode(ISD::BITCAST, DL, RegVT, OpInfo.CallOperand);
9836         OpInfo.ConstraintVT = RegVT;
9837         // If the operand is an FP value and we want it in integer registers,
9838         // use the corresponding integer type. This turns an f64 value into
9839         // i64, which can be passed with two i32 values on a 32-bit machine.
9840       } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
9841         MVT VT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits());
9842         if (OpInfo.Type == InlineAsm::isInput)
9843           OpInfo.CallOperand =
9844               DAG.getNode(ISD::BITCAST, DL, VT, OpInfo.CallOperand);
9845         OpInfo.ConstraintVT = VT;
9846       }
9847     }
9848   }
9849 
9850   // No need to allocate a matching input constraint since the constraint it's
9851   // matching to has already been allocated.
9852   if (OpInfo.isMatchingInputConstraint())
9853     return std::nullopt;
9854 
9855   EVT ValueVT = OpInfo.ConstraintVT;
9856   if (OpInfo.ConstraintVT == MVT::Other)
9857     ValueVT = RegVT;
9858 
9859   // Initialize NumRegs.
9860   unsigned NumRegs = 1;
9861   if (OpInfo.ConstraintVT != MVT::Other)
9862     NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT, RegVT);
9863 
9864   // If this is a constraint for a specific physical register, like {r17},
9865   // assign it now.
9866 
9867   // If this associated to a specific register, initialize iterator to correct
9868   // place. If virtual, make sure we have enough registers
9869 
9870   // Initialize iterator if necessary
9871   TargetRegisterClass::iterator I = RC->begin();
9872   MachineRegisterInfo &RegInfo = MF.getRegInfo();
9873 
9874   // Do not check for single registers.
9875   if (AssignedReg) {
9876     I = std::find(I, RC->end(), AssignedReg);
9877     if (I == RC->end()) {
9878       // RC does not contain the selected register, which indicates a
9879       // mismatch between the register and the required type/bitwidth.
9880       return {AssignedReg};
9881     }
9882   }
9883 
9884   for (; NumRegs; --NumRegs, ++I) {
9885     assert(I != RC->end() && "Ran out of registers to allocate!");
9886     Register R = AssignedReg ? Register(*I) : RegInfo.createVirtualRegister(RC);
9887     Regs.push_back(R);
9888   }
9889 
9890   OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
9891   return std::nullopt;
9892 }
9893 
9894 static unsigned
9895 findMatchingInlineAsmOperand(unsigned OperandNo,
9896                              const std::vector<SDValue> &AsmNodeOperands) {
9897   // Scan until we find the definition we already emitted of this operand.
9898   unsigned CurOp = InlineAsm::Op_FirstOperand;
9899   for (; OperandNo; --OperandNo) {
9900     // Advance to the next operand.
9901     unsigned OpFlag = AsmNodeOperands[CurOp]->getAsZExtVal();
9902     const InlineAsm::Flag F(OpFlag);
9903     assert(
9904         (F.isRegDefKind() || F.isRegDefEarlyClobberKind() || F.isMemKind()) &&
9905         "Skipped past definitions?");
9906     CurOp += F.getNumOperandRegisters() + 1;
9907   }
9908   return CurOp;
9909 }
9910 
9911 namespace {
9912 
9913 class ExtraFlags {
9914   unsigned Flags = 0;
9915 
9916 public:
9917   explicit ExtraFlags(const CallBase &Call) {
9918     const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
9919     if (IA->hasSideEffects())
9920       Flags |= InlineAsm::Extra_HasSideEffects;
9921     if (IA->isAlignStack())
9922       Flags |= InlineAsm::Extra_IsAlignStack;
9923     if (Call.isConvergent())
9924       Flags |= InlineAsm::Extra_IsConvergent;
9925     Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
9926   }
9927 
9928   void update(const TargetLowering::AsmOperandInfo &OpInfo) {
9929     // Ideally, we would only check against memory constraints.  However, the
9930     // meaning of an Other constraint can be target-specific and we can't easily
9931     // reason about it.  Therefore, be conservative and set MayLoad/MayStore
9932     // for Other constraints as well.
9933     if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
9934         OpInfo.ConstraintType == TargetLowering::C_Other) {
9935       if (OpInfo.Type == InlineAsm::isInput)
9936         Flags |= InlineAsm::Extra_MayLoad;
9937       else if (OpInfo.Type == InlineAsm::isOutput)
9938         Flags |= InlineAsm::Extra_MayStore;
9939       else if (OpInfo.Type == InlineAsm::isClobber)
9940         Flags |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore);
9941     }
9942   }
9943 
9944   unsigned get() const { return Flags; }
9945 };
9946 
9947 } // end anonymous namespace
9948 
9949 static bool isFunction(SDValue Op) {
9950   if (Op && Op.getOpcode() == ISD::GlobalAddress) {
9951     if (auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
9952       auto Fn = dyn_cast_or_null<Function>(GA->getGlobal());
9953 
9954       // In normal "call dllimport func" instruction (non-inlineasm) it force
9955       // indirect access by specifing call opcode. And usually specially print
9956       // asm with indirect symbol (i.g: "*") according to opcode. Inline asm can
9957       // not do in this way now. (In fact, this is similar with "Data Access"
9958       // action). So here we ignore dllimport function.
9959       if (Fn && !Fn->hasDLLImportStorageClass())
9960         return true;
9961     }
9962   }
9963   return false;
9964 }
9965 
9966 /// visitInlineAsm - Handle a call to an InlineAsm object.
9967 void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call,
9968                                          const BasicBlock *EHPadBB) {
9969   const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
9970 
9971   /// ConstraintOperands - Information about all of the constraints.
9972   SmallVector<SDISelAsmOperandInfo, 16> ConstraintOperands;
9973 
9974   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9975   TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints(
9976       DAG.getDataLayout(), DAG.getSubtarget().getRegisterInfo(), Call);
9977 
9978   // First Pass: Calculate HasSideEffects and ExtraFlags (AlignStack,
9979   // AsmDialect, MayLoad, MayStore).
9980   bool HasSideEffect = IA->hasSideEffects();
9981   ExtraFlags ExtraInfo(Call);
9982 
9983   for (auto &T : TargetConstraints) {
9984     ConstraintOperands.push_back(SDISelAsmOperandInfo(T));
9985     SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
9986 
9987     if (OpInfo.CallOperandVal)
9988       OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
9989 
9990     if (!HasSideEffect)
9991       HasSideEffect = OpInfo.hasMemory(TLI);
9992 
9993     // Determine if this InlineAsm MayLoad or MayStore based on the constraints.
9994     // FIXME: Could we compute this on OpInfo rather than T?
9995 
9996     // Compute the constraint code and ConstraintType to use.
9997     TLI.ComputeConstraintToUse(T, SDValue());
9998 
9999     if (T.ConstraintType == TargetLowering::C_Immediate &&
10000         OpInfo.CallOperand && !isa<ConstantSDNode>(OpInfo.CallOperand))
10001       // We've delayed emitting a diagnostic like the "n" constraint because
10002       // inlining could cause an integer showing up.
10003       return emitInlineAsmError(Call, "constraint '" + Twine(T.ConstraintCode) +
10004                                           "' expects an integer constant "
10005                                           "expression");
10006 
10007     ExtraInfo.update(T);
10008   }
10009 
10010   // We won't need to flush pending loads if this asm doesn't touch
10011   // memory and is nonvolatile.
10012   SDValue Glue, Chain = (HasSideEffect) ? getRoot() : DAG.getRoot();
10013 
10014   bool EmitEHLabels = isa<InvokeInst>(Call);
10015   if (EmitEHLabels) {
10016     assert(EHPadBB && "InvokeInst must have an EHPadBB");
10017   }
10018   bool IsCallBr = isa<CallBrInst>(Call);
10019 
10020   if (IsCallBr || EmitEHLabels) {
10021     // If this is a callbr or invoke we need to flush pending exports since
10022     // inlineasm_br and invoke are terminators.
10023     // We need to do this before nodes are glued to the inlineasm_br node.
10024     Chain = getControlRoot();
10025   }
10026 
10027   MCSymbol *BeginLabel = nullptr;
10028   if (EmitEHLabels) {
10029     Chain = lowerStartEH(Chain, EHPadBB, BeginLabel);
10030   }
10031 
10032   int OpNo = -1;
10033   SmallVector<StringRef> AsmStrs;
10034   IA->collectAsmStrs(AsmStrs);
10035 
10036   // Second pass over the constraints: compute which constraint option to use.
10037   for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
10038     if (OpInfo.hasArg() || OpInfo.Type == InlineAsm::isOutput)
10039       OpNo++;
10040 
10041     // If this is an output operand with a matching input operand, look up the
10042     // matching input. If their types mismatch, e.g. one is an integer, the
10043     // other is floating point, or their sizes are different, flag it as an
10044     // error.
10045     if (OpInfo.hasMatchingInput()) {
10046       SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
10047       patchMatchingInput(OpInfo, Input, DAG);
10048     }
10049 
10050     // Compute the constraint code and ConstraintType to use.
10051     TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
10052 
10053     if ((OpInfo.ConstraintType == TargetLowering::C_Memory &&
10054          OpInfo.Type == InlineAsm::isClobber) ||
10055         OpInfo.ConstraintType == TargetLowering::C_Address)
10056       continue;
10057 
10058     // In Linux PIC model, there are 4 cases about value/label addressing:
10059     //
10060     // 1: Function call or Label jmp inside the module.
10061     // 2: Data access (such as global variable, static variable) inside module.
10062     // 3: Function call or Label jmp outside the module.
10063     // 4: Data access (such as global variable) outside the module.
10064     //
10065     // Due to current llvm inline asm architecture designed to not "recognize"
10066     // the asm code, there are quite troubles for us to treat mem addressing
10067     // differently for same value/adress used in different instuctions.
10068     // For example, in pic model, call a func may in plt way or direclty
10069     // pc-related, but lea/mov a function adress may use got.
10070     //
10071     // Here we try to "recognize" function call for the case 1 and case 3 in
10072     // inline asm. And try to adjust the constraint for them.
10073     //
10074     // TODO: Due to current inline asm didn't encourage to jmp to the outsider
10075     // label, so here we don't handle jmp function label now, but we need to
10076     // enhance it (especilly in PIC model) if we meet meaningful requirements.
10077     if (OpInfo.isIndirect && isFunction(OpInfo.CallOperand) &&
10078         TLI.isInlineAsmTargetBranch(AsmStrs, OpNo) &&
10079         TM.getCodeModel() != CodeModel::Large) {
10080       OpInfo.isIndirect = false;
10081       OpInfo.ConstraintType = TargetLowering::C_Address;
10082     }
10083 
10084     // If this is a memory input, and if the operand is not indirect, do what we
10085     // need to provide an address for the memory input.
10086     if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
10087         !OpInfo.isIndirect) {
10088       assert((OpInfo.isMultipleAlternative ||
10089               (OpInfo.Type == InlineAsm::isInput)) &&
10090              "Can only indirectify direct input operands!");
10091 
10092       // Memory operands really want the address of the value.
10093       Chain = getAddressForMemoryInput(Chain, getCurSDLoc(), OpInfo, DAG);
10094 
10095       // There is no longer a Value* corresponding to this operand.
10096       OpInfo.CallOperandVal = nullptr;
10097 
10098       // It is now an indirect operand.
10099       OpInfo.isIndirect = true;
10100     }
10101 
10102   }
10103 
10104   // AsmNodeOperands - The operands for the ISD::INLINEASM node.
10105   std::vector<SDValue> AsmNodeOperands;
10106   AsmNodeOperands.push_back(SDValue());  // reserve space for input chain
10107   AsmNodeOperands.push_back(DAG.getTargetExternalSymbol(
10108       IA->getAsmString().c_str(), TLI.getProgramPointerTy(DAG.getDataLayout())));
10109 
10110   // If we have a !srcloc metadata node associated with it, we want to attach
10111   // this to the ultimately generated inline asm machineinstr.  To do this, we
10112   // pass in the third operand as this (potentially null) inline asm MDNode.
10113   const MDNode *SrcLoc = Call.getMetadata("srcloc");
10114   AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc));
10115 
10116   // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore
10117   // bits as operand 3.
10118   AsmNodeOperands.push_back(DAG.getTargetConstant(
10119       ExtraInfo.get(), getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
10120 
10121   // Third pass: Loop over operands to prepare DAG-level operands.. As part of
10122   // this, assign virtual and physical registers for inputs and otput.
10123   for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
10124     // Assign Registers.
10125     SDISelAsmOperandInfo &RefOpInfo =
10126         OpInfo.isMatchingInputConstraint()
10127             ? ConstraintOperands[OpInfo.getMatchedOperand()]
10128             : OpInfo;
10129     const auto RegError =
10130         getRegistersForValue(DAG, getCurSDLoc(), OpInfo, RefOpInfo);
10131     if (RegError) {
10132       const MachineFunction &MF = DAG.getMachineFunction();
10133       const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
10134       const char *RegName = TRI.getName(*RegError);
10135       emitInlineAsmError(Call, "register '" + Twine(RegName) +
10136                                    "' allocated for constraint '" +
10137                                    Twine(OpInfo.ConstraintCode) +
10138                                    "' does not match required type");
10139       return;
10140     }
10141 
10142     auto DetectWriteToReservedRegister = [&]() {
10143       const MachineFunction &MF = DAG.getMachineFunction();
10144       const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
10145       for (unsigned Reg : OpInfo.AssignedRegs.Regs) {
10146         if (Register::isPhysicalRegister(Reg) &&
10147             TRI.isInlineAsmReadOnlyReg(MF, Reg)) {
10148           const char *RegName = TRI.getName(Reg);
10149           emitInlineAsmError(Call, "write to reserved register '" +
10150                                        Twine(RegName) + "'");
10151           return true;
10152         }
10153       }
10154       return false;
10155     };
10156     assert((OpInfo.ConstraintType != TargetLowering::C_Address ||
10157             (OpInfo.Type == InlineAsm::isInput &&
10158              !OpInfo.isMatchingInputConstraint())) &&
10159            "Only address as input operand is allowed.");
10160 
10161     switch (OpInfo.Type) {
10162     case InlineAsm::isOutput:
10163       if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
10164         const InlineAsm::ConstraintCode ConstraintID =
10165             TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
10166         assert(ConstraintID != InlineAsm::ConstraintCode::Unknown &&
10167                "Failed to convert memory constraint code to constraint id.");
10168 
10169         // Add information to the INLINEASM node to know about this output.
10170         InlineAsm::Flag OpFlags(InlineAsm::Kind::Mem, 1);
10171         OpFlags.setMemConstraint(ConstraintID);
10172         AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlags, getCurSDLoc(),
10173                                                         MVT::i32));
10174         AsmNodeOperands.push_back(OpInfo.CallOperand);
10175       } else {
10176         // Otherwise, this outputs to a register (directly for C_Register /
10177         // C_RegisterClass, and a target-defined fashion for
10178         // C_Immediate/C_Other). Find a register that we can use.
10179         if (OpInfo.AssignedRegs.Regs.empty()) {
10180           emitInlineAsmError(
10181               Call, "couldn't allocate output register for constraint '" +
10182                         Twine(OpInfo.ConstraintCode) + "'");
10183           return;
10184         }
10185 
10186         if (DetectWriteToReservedRegister())
10187           return;
10188 
10189         // Add information to the INLINEASM node to know that this register is
10190         // set.
10191         OpInfo.AssignedRegs.AddInlineAsmOperands(
10192             OpInfo.isEarlyClobber ? InlineAsm::Kind::RegDefEarlyClobber
10193                                   : InlineAsm::Kind::RegDef,
10194             false, 0, getCurSDLoc(), DAG, AsmNodeOperands);
10195       }
10196       break;
10197 
10198     case InlineAsm::isInput:
10199     case InlineAsm::isLabel: {
10200       SDValue InOperandVal = OpInfo.CallOperand;
10201 
10202       if (OpInfo.isMatchingInputConstraint()) {
10203         // If this is required to match an output register we have already set,
10204         // just use its register.
10205         auto CurOp = findMatchingInlineAsmOperand(OpInfo.getMatchedOperand(),
10206                                                   AsmNodeOperands);
10207         InlineAsm::Flag Flag(AsmNodeOperands[CurOp]->getAsZExtVal());
10208         if (Flag.isRegDefKind() || Flag.isRegDefEarlyClobberKind()) {
10209           if (OpInfo.isIndirect) {
10210             // This happens on gcc/testsuite/gcc.dg/pr8788-1.c
10211             emitInlineAsmError(Call, "inline asm not supported yet: "
10212                                      "don't know how to handle tied "
10213                                      "indirect register inputs");
10214             return;
10215           }
10216 
10217           SmallVector<Register, 4> Regs;
10218           MachineFunction &MF = DAG.getMachineFunction();
10219           MachineRegisterInfo &MRI = MF.getRegInfo();
10220           const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
10221           auto *R = cast<RegisterSDNode>(AsmNodeOperands[CurOp+1]);
10222           Register TiedReg = R->getReg();
10223           MVT RegVT = R->getSimpleValueType(0);
10224           const TargetRegisterClass *RC =
10225               TiedReg.isVirtual()     ? MRI.getRegClass(TiedReg)
10226               : RegVT != MVT::Untyped ? TLI.getRegClassFor(RegVT)
10227                                       : TRI.getMinimalPhysRegClass(TiedReg);
10228           for (unsigned i = 0, e = Flag.getNumOperandRegisters(); i != e; ++i)
10229             Regs.push_back(MRI.createVirtualRegister(RC));
10230 
10231           RegsForValue MatchedRegs(Regs, RegVT, InOperandVal.getValueType());
10232 
10233           SDLoc dl = getCurSDLoc();
10234           // Use the produced MatchedRegs object to
10235           MatchedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Glue, &Call);
10236           MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind::RegUse, true,
10237                                            OpInfo.getMatchedOperand(), dl, DAG,
10238                                            AsmNodeOperands);
10239           break;
10240         }
10241 
10242         assert(Flag.isMemKind() && "Unknown matching constraint!");
10243         assert(Flag.getNumOperandRegisters() == 1 &&
10244                "Unexpected number of operands");
10245         // Add information to the INLINEASM node to know about this input.
10246         // See InlineAsm.h isUseOperandTiedToDef.
10247         Flag.clearMemConstraint();
10248         Flag.setMatchingOp(OpInfo.getMatchedOperand());
10249         AsmNodeOperands.push_back(DAG.getTargetConstant(
10250             Flag, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
10251         AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
10252         break;
10253       }
10254 
10255       // Treat indirect 'X' constraint as memory.
10256       if (OpInfo.ConstraintType == TargetLowering::C_Other &&
10257           OpInfo.isIndirect)
10258         OpInfo.ConstraintType = TargetLowering::C_Memory;
10259 
10260       if (OpInfo.ConstraintType == TargetLowering::C_Immediate ||
10261           OpInfo.ConstraintType == TargetLowering::C_Other) {
10262         std::vector<SDValue> Ops;
10263         TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode,
10264                                           Ops, DAG);
10265         if (Ops.empty()) {
10266           if (OpInfo.ConstraintType == TargetLowering::C_Immediate)
10267             if (isa<ConstantSDNode>(InOperandVal)) {
10268               emitInlineAsmError(Call, "value out of range for constraint '" +
10269                                            Twine(OpInfo.ConstraintCode) + "'");
10270               return;
10271             }
10272 
10273           emitInlineAsmError(Call,
10274                              "invalid operand for inline asm constraint '" +
10275                                  Twine(OpInfo.ConstraintCode) + "'");
10276           return;
10277         }
10278 
10279         // Add information to the INLINEASM node to know about this input.
10280         InlineAsm::Flag ResOpType(InlineAsm::Kind::Imm, Ops.size());
10281         AsmNodeOperands.push_back(DAG.getTargetConstant(
10282             ResOpType, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
10283         llvm::append_range(AsmNodeOperands, Ops);
10284         break;
10285       }
10286 
10287       if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
10288         assert((OpInfo.isIndirect ||
10289                 OpInfo.ConstraintType != TargetLowering::C_Memory) &&
10290                "Operand must be indirect to be a mem!");
10291         assert(InOperandVal.getValueType() ==
10292                    TLI.getPointerTy(DAG.getDataLayout()) &&
10293                "Memory operands expect pointer values");
10294 
10295         const InlineAsm::ConstraintCode ConstraintID =
10296             TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
10297         assert(ConstraintID != InlineAsm::ConstraintCode::Unknown &&
10298                "Failed to convert memory constraint code to constraint id.");
10299 
10300         // Add information to the INLINEASM node to know about this input.
10301         InlineAsm::Flag ResOpType(InlineAsm::Kind::Mem, 1);
10302         ResOpType.setMemConstraint(ConstraintID);
10303         AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
10304                                                         getCurSDLoc(),
10305                                                         MVT::i32));
10306         AsmNodeOperands.push_back(InOperandVal);
10307         break;
10308       }
10309 
10310       if (OpInfo.ConstraintType == TargetLowering::C_Address) {
10311         const InlineAsm::ConstraintCode ConstraintID =
10312             TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
10313         assert(ConstraintID != InlineAsm::ConstraintCode::Unknown &&
10314                "Failed to convert memory constraint code to constraint id.");
10315 
10316         InlineAsm::Flag ResOpType(InlineAsm::Kind::Mem, 1);
10317 
10318         SDValue AsmOp = InOperandVal;
10319         if (isFunction(InOperandVal)) {
10320           auto *GA = cast<GlobalAddressSDNode>(InOperandVal);
10321           ResOpType = InlineAsm::Flag(InlineAsm::Kind::Func, 1);
10322           AsmOp = DAG.getTargetGlobalAddress(GA->getGlobal(), getCurSDLoc(),
10323                                              InOperandVal.getValueType(),
10324                                              GA->getOffset());
10325         }
10326 
10327         // Add information to the INLINEASM node to know about this input.
10328         ResOpType.setMemConstraint(ConstraintID);
10329 
10330         AsmNodeOperands.push_back(
10331             DAG.getTargetConstant(ResOpType, getCurSDLoc(), MVT::i32));
10332 
10333         AsmNodeOperands.push_back(AsmOp);
10334         break;
10335       }
10336 
10337       if (OpInfo.ConstraintType != TargetLowering::C_RegisterClass &&
10338           OpInfo.ConstraintType != TargetLowering::C_Register) {
10339         emitInlineAsmError(Call, "unknown asm constraint '" +
10340                                      Twine(OpInfo.ConstraintCode) + "'");
10341         return;
10342       }
10343 
10344       // TODO: Support this.
10345       if (OpInfo.isIndirect) {
10346         emitInlineAsmError(
10347             Call, "Don't know how to handle indirect register inputs yet "
10348                   "for constraint '" +
10349                       Twine(OpInfo.ConstraintCode) + "'");
10350         return;
10351       }
10352 
10353       // Copy the input into the appropriate registers.
10354       if (OpInfo.AssignedRegs.Regs.empty()) {
10355         emitInlineAsmError(Call,
10356                            "couldn't allocate input reg for constraint '" +
10357                                Twine(OpInfo.ConstraintCode) + "'");
10358         return;
10359       }
10360 
10361       if (DetectWriteToReservedRegister())
10362         return;
10363 
10364       SDLoc dl = getCurSDLoc();
10365 
10366       OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Glue,
10367                                         &Call);
10368 
10369       OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind::RegUse, false,
10370                                                0, dl, DAG, AsmNodeOperands);
10371       break;
10372     }
10373     case InlineAsm::isClobber:
10374       // Add the clobbered value to the operand list, so that the register
10375       // allocator is aware that the physreg got clobbered.
10376       if (!OpInfo.AssignedRegs.Regs.empty())
10377         OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind::Clobber,
10378                                                  false, 0, getCurSDLoc(), DAG,
10379                                                  AsmNodeOperands);
10380       break;
10381     }
10382   }
10383 
10384   // Finish up input operands.  Set the input chain and add the flag last.
10385   AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
10386   if (Glue.getNode()) AsmNodeOperands.push_back(Glue);
10387 
10388   unsigned ISDOpc = IsCallBr ? ISD::INLINEASM_BR : ISD::INLINEASM;
10389   Chain = DAG.getNode(ISDOpc, getCurSDLoc(),
10390                       DAG.getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
10391   Glue = Chain.getValue(1);
10392 
10393   // Do additional work to generate outputs.
10394 
10395   SmallVector<EVT, 1> ResultVTs;
10396   SmallVector<SDValue, 1> ResultValues;
10397   SmallVector<SDValue, 8> OutChains;
10398 
10399   llvm::Type *CallResultType = Call.getType();
10400   ArrayRef<Type *> ResultTypes;
10401   if (StructType *StructResult = dyn_cast<StructType>(CallResultType))
10402     ResultTypes = StructResult->elements();
10403   else if (!CallResultType->isVoidTy())
10404     ResultTypes = ArrayRef(CallResultType);
10405 
10406   auto CurResultType = ResultTypes.begin();
10407   auto handleRegAssign = [&](SDValue V) {
10408     assert(CurResultType != ResultTypes.end() && "Unexpected value");
10409     assert((*CurResultType)->isSized() && "Unexpected unsized type");
10410     EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), *CurResultType);
10411     ++CurResultType;
10412     // If the type of the inline asm call site return value is different but has
10413     // same size as the type of the asm output bitcast it.  One example of this
10414     // is for vectors with different width / number of elements.  This can
10415     // happen for register classes that can contain multiple different value
10416     // types.  The preg or vreg allocated may not have the same VT as was
10417     // expected.
10418     //
10419     // This can also happen for a return value that disagrees with the register
10420     // class it is put in, eg. a double in a general-purpose register on a
10421     // 32-bit machine.
10422     if (ResultVT != V.getValueType() &&
10423         ResultVT.getSizeInBits() == V.getValueSizeInBits())
10424       V = DAG.getNode(ISD::BITCAST, getCurSDLoc(), ResultVT, V);
10425     else if (ResultVT != V.getValueType() && ResultVT.isInteger() &&
10426              V.getValueType().isInteger()) {
10427       // If a result value was tied to an input value, the computed result
10428       // may have a wider width than the expected result.  Extract the
10429       // relevant portion.
10430       V = DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), ResultVT, V);
10431     }
10432     assert(ResultVT == V.getValueType() && "Asm result value mismatch!");
10433     ResultVTs.push_back(ResultVT);
10434     ResultValues.push_back(V);
10435   };
10436 
10437   // Deal with output operands.
10438   for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
10439     if (OpInfo.Type == InlineAsm::isOutput) {
10440       SDValue Val;
10441       // Skip trivial output operands.
10442       if (OpInfo.AssignedRegs.Regs.empty())
10443         continue;
10444 
10445       switch (OpInfo.ConstraintType) {
10446       case TargetLowering::C_Register:
10447       case TargetLowering::C_RegisterClass:
10448         Val = OpInfo.AssignedRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(),
10449                                                   Chain, &Glue, &Call);
10450         break;
10451       case TargetLowering::C_Immediate:
10452       case TargetLowering::C_Other:
10453         Val = TLI.LowerAsmOutputForConstraint(Chain, Glue, getCurSDLoc(),
10454                                               OpInfo, DAG);
10455         break;
10456       case TargetLowering::C_Memory:
10457         break; // Already handled.
10458       case TargetLowering::C_Address:
10459         break; // Silence warning.
10460       case TargetLowering::C_Unknown:
10461         assert(false && "Unexpected unknown constraint");
10462       }
10463 
10464       // Indirect output manifest as stores. Record output chains.
10465       if (OpInfo.isIndirect) {
10466         const Value *Ptr = OpInfo.CallOperandVal;
10467         assert(Ptr && "Expected value CallOperandVal for indirect asm operand");
10468         SDValue Store = DAG.getStore(Chain, getCurSDLoc(), Val, getValue(Ptr),
10469                                      MachinePointerInfo(Ptr));
10470         OutChains.push_back(Store);
10471       } else {
10472         // generate CopyFromRegs to associated registers.
10473         assert(!Call.getType()->isVoidTy() && "Bad inline asm!");
10474         if (Val.getOpcode() == ISD::MERGE_VALUES) {
10475           for (const SDValue &V : Val->op_values())
10476             handleRegAssign(V);
10477         } else
10478           handleRegAssign(Val);
10479       }
10480     }
10481   }
10482 
10483   // Set results.
10484   if (!ResultValues.empty()) {
10485     assert(CurResultType == ResultTypes.end() &&
10486            "Mismatch in number of ResultTypes");
10487     assert(ResultValues.size() == ResultTypes.size() &&
10488            "Mismatch in number of output operands in asm result");
10489 
10490     SDValue V = DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
10491                             DAG.getVTList(ResultVTs), ResultValues);
10492     setValue(&Call, V);
10493   }
10494 
10495   // Collect store chains.
10496   if (!OutChains.empty())
10497     Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, OutChains);
10498 
10499   if (EmitEHLabels) {
10500     Chain = lowerEndEH(Chain, cast<InvokeInst>(&Call), EHPadBB, BeginLabel);
10501   }
10502 
10503   // Only Update Root if inline assembly has a memory effect.
10504   if (ResultValues.empty() || HasSideEffect || !OutChains.empty() || IsCallBr ||
10505       EmitEHLabels)
10506     DAG.setRoot(Chain);
10507 }
10508 
10509 void SelectionDAGBuilder::emitInlineAsmError(const CallBase &Call,
10510                                              const Twine &Message) {
10511   LLVMContext &Ctx = *DAG.getContext();
10512   Ctx.emitError(&Call, Message);
10513 
10514   // Make sure we leave the DAG in a valid state
10515   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10516   SmallVector<EVT, 1> ValueVTs;
10517   ComputeValueVTs(TLI, DAG.getDataLayout(), Call.getType(), ValueVTs);
10518 
10519   if (ValueVTs.empty())
10520     return;
10521 
10522   SmallVector<SDValue, 1> Ops;
10523   for (const EVT &VT : ValueVTs)
10524     Ops.push_back(DAG.getUNDEF(VT));
10525 
10526   setValue(&Call, DAG.getMergeValues(Ops, getCurSDLoc()));
10527 }
10528 
10529 void SelectionDAGBuilder::visitVAStart(const CallInst &I) {
10530   DAG.setRoot(DAG.getNode(ISD::VASTART, getCurSDLoc(),
10531                           MVT::Other, getRoot(),
10532                           getValue(I.getArgOperand(0)),
10533                           DAG.getSrcValue(I.getArgOperand(0))));
10534 }
10535 
10536 void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
10537   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10538   const DataLayout &DL = DAG.getDataLayout();
10539   SDValue V = DAG.getVAArg(
10540       TLI.getMemValueType(DAG.getDataLayout(), I.getType()), getCurSDLoc(),
10541       getRoot(), getValue(I.getOperand(0)), DAG.getSrcValue(I.getOperand(0)),
10542       DL.getABITypeAlign(I.getType()).value());
10543   DAG.setRoot(V.getValue(1));
10544 
10545   if (I.getType()->isPointerTy())
10546     V = DAG.getPtrExtOrTrunc(
10547         V, getCurSDLoc(), TLI.getValueType(DAG.getDataLayout(), I.getType()));
10548   setValue(&I, V);
10549 }
10550 
10551 void SelectionDAGBuilder::visitVAEnd(const CallInst &I) {
10552   DAG.setRoot(DAG.getNode(ISD::VAEND, getCurSDLoc(),
10553                           MVT::Other, getRoot(),
10554                           getValue(I.getArgOperand(0)),
10555                           DAG.getSrcValue(I.getArgOperand(0))));
10556 }
10557 
10558 void SelectionDAGBuilder::visitVACopy(const CallInst &I) {
10559   DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurSDLoc(),
10560                           MVT::Other, getRoot(),
10561                           getValue(I.getArgOperand(0)),
10562                           getValue(I.getArgOperand(1)),
10563                           DAG.getSrcValue(I.getArgOperand(0)),
10564                           DAG.getSrcValue(I.getArgOperand(1))));
10565 }
10566 
10567 SDValue SelectionDAGBuilder::lowerRangeToAssertZExt(SelectionDAG &DAG,
10568                                                     const Instruction &I,
10569                                                     SDValue Op) {
10570   std::optional<ConstantRange> CR = getRange(I);
10571 
10572   if (!CR || CR->isFullSet() || CR->isEmptySet() || CR->isUpperWrapped())
10573     return Op;
10574 
10575   APInt Lo = CR->getUnsignedMin();
10576   if (!Lo.isMinValue())
10577     return Op;
10578 
10579   APInt Hi = CR->getUnsignedMax();
10580   unsigned Bits = std::max(Hi.getActiveBits(),
10581                            static_cast<unsigned>(IntegerType::MIN_INT_BITS));
10582 
10583   EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
10584 
10585   SDLoc SL = getCurSDLoc();
10586 
10587   SDValue ZExt = DAG.getNode(ISD::AssertZext, SL, Op.getValueType(), Op,
10588                              DAG.getValueType(SmallVT));
10589   unsigned NumVals = Op.getNode()->getNumValues();
10590   if (NumVals == 1)
10591     return ZExt;
10592 
10593   SmallVector<SDValue, 4> Ops;
10594 
10595   Ops.push_back(ZExt);
10596   for (unsigned I = 1; I != NumVals; ++I)
10597     Ops.push_back(Op.getValue(I));
10598 
10599   return DAG.getMergeValues(Ops, SL);
10600 }
10601 
10602 /// Populate a CallLowerinInfo (into \p CLI) based on the properties of
10603 /// the call being lowered.
10604 ///
10605 /// This is a helper for lowering intrinsics that follow a target calling
10606 /// convention or require stack pointer adjustment. Only a subset of the
10607 /// intrinsic's operands need to participate in the calling convention.
10608 void SelectionDAGBuilder::populateCallLoweringInfo(
10609     TargetLowering::CallLoweringInfo &CLI, const CallBase *Call,
10610     unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy,
10611     AttributeSet RetAttrs, bool IsPatchPoint) {
10612   TargetLowering::ArgListTy Args;
10613   Args.reserve(NumArgs);
10614 
10615   // Populate the argument list.
10616   // Attributes for args start at offset 1, after the return attribute.
10617   for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs;
10618        ArgI != ArgE; ++ArgI) {
10619     const Value *V = Call->getOperand(ArgI);
10620 
10621     assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
10622 
10623     TargetLowering::ArgListEntry Entry;
10624     Entry.Node = getValue(V);
10625     Entry.Ty = V->getType();
10626     Entry.setAttributes(Call, ArgI);
10627     Args.push_back(Entry);
10628   }
10629 
10630   CLI.setDebugLoc(getCurSDLoc())
10631       .setChain(getRoot())
10632       .setCallee(Call->getCallingConv(), ReturnTy, Callee, std::move(Args),
10633                  RetAttrs)
10634       .setDiscardResult(Call->use_empty())
10635       .setIsPatchPoint(IsPatchPoint)
10636       .setIsPreallocated(
10637           Call->countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0);
10638 }
10639 
10640 /// Add a stack map intrinsic call's live variable operands to a stackmap
10641 /// or patchpoint target node's operand list.
10642 ///
10643 /// Constants are converted to TargetConstants purely as an optimization to
10644 /// avoid constant materialization and register allocation.
10645 ///
10646 /// FrameIndex operands are converted to TargetFrameIndex so that ISEL does not
10647 /// generate addess computation nodes, and so FinalizeISel can convert the
10648 /// TargetFrameIndex into a DirectMemRefOp StackMap location. This avoids
10649 /// address materialization and register allocation, but may also be required
10650 /// for correctness. If a StackMap (or PatchPoint) intrinsic directly uses an
10651 /// alloca in the entry block, then the runtime may assume that the alloca's
10652 /// StackMap location can be read immediately after compilation and that the
10653 /// location is valid at any point during execution (this is similar to the
10654 /// assumption made by the llvm.gcroot intrinsic). If the alloca's location were
10655 /// only available in a register, then the runtime would need to trap when
10656 /// execution reaches the StackMap in order to read the alloca's location.
10657 static void addStackMapLiveVars(const CallBase &Call, unsigned StartIdx,
10658                                 const SDLoc &DL, SmallVectorImpl<SDValue> &Ops,
10659                                 SelectionDAGBuilder &Builder) {
10660   SelectionDAG &DAG = Builder.DAG;
10661   for (unsigned I = StartIdx; I < Call.arg_size(); I++) {
10662     SDValue Op = Builder.getValue(Call.getArgOperand(I));
10663 
10664     // Things on the stack are pointer-typed, meaning that they are already
10665     // legal and can be emitted directly to target nodes.
10666     if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op)) {
10667       Ops.push_back(DAG.getTargetFrameIndex(FI->getIndex(), Op.getValueType()));
10668     } else {
10669       // Otherwise emit a target independent node to be legalised.
10670       Ops.push_back(Builder.getValue(Call.getArgOperand(I)));
10671     }
10672   }
10673 }
10674 
10675 /// Lower llvm.experimental.stackmap.
10676 void SelectionDAGBuilder::visitStackmap(const CallInst &CI) {
10677   // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
10678   //                                  [live variables...])
10679 
10680   assert(CI.getType()->isVoidTy() && "Stackmap cannot return a value.");
10681 
10682   SDValue Chain, InGlue, Callee;
10683   SmallVector<SDValue, 32> Ops;
10684 
10685   SDLoc DL = getCurSDLoc();
10686   Callee = getValue(CI.getCalledOperand());
10687 
10688   // The stackmap intrinsic only records the live variables (the arguments
10689   // passed to it) and emits NOPS (if requested). Unlike the patchpoint
10690   // intrinsic, this won't be lowered to a function call. This means we don't
10691   // have to worry about calling conventions and target specific lowering code.
10692   // Instead we perform the call lowering right here.
10693   //
10694   // chain, flag = CALLSEQ_START(chain, 0, 0)
10695   // chain, flag = STACKMAP(id, nbytes, ..., chain, flag)
10696   // chain, flag = CALLSEQ_END(chain, 0, 0, flag)
10697   //
10698   Chain = DAG.getCALLSEQ_START(getRoot(), 0, 0, DL);
10699   InGlue = Chain.getValue(1);
10700 
10701   // Add the STACKMAP operands, starting with DAG house-keeping.
10702   Ops.push_back(Chain);
10703   Ops.push_back(InGlue);
10704 
10705   // Add the <id>, <numShadowBytes> operands.
10706   //
10707   // These do not require legalisation, and can be emitted directly to target
10708   // constant nodes.
10709   SDValue ID = getValue(CI.getArgOperand(0));
10710   assert(ID.getValueType() == MVT::i64);
10711   SDValue IDConst =
10712       DAG.getTargetConstant(ID->getAsZExtVal(), DL, ID.getValueType());
10713   Ops.push_back(IDConst);
10714 
10715   SDValue Shad = getValue(CI.getArgOperand(1));
10716   assert(Shad.getValueType() == MVT::i32);
10717   SDValue ShadConst =
10718       DAG.getTargetConstant(Shad->getAsZExtVal(), DL, Shad.getValueType());
10719   Ops.push_back(ShadConst);
10720 
10721   // Add the live variables.
10722   addStackMapLiveVars(CI, 2, DL, Ops, *this);
10723 
10724   // Create the STACKMAP node.
10725   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
10726   Chain = DAG.getNode(ISD::STACKMAP, DL, NodeTys, Ops);
10727   InGlue = Chain.getValue(1);
10728 
10729   Chain = DAG.getCALLSEQ_END(Chain, 0, 0, InGlue, DL);
10730 
10731   // Stackmaps don't generate values, so nothing goes into the NodeMap.
10732 
10733   // Set the root to the target-lowered call chain.
10734   DAG.setRoot(Chain);
10735 
10736   // Inform the Frame Information that we have a stackmap in this function.
10737   FuncInfo.MF->getFrameInfo().setHasStackMap();
10738 }
10739 
10740 /// Lower llvm.experimental.patchpoint directly to its target opcode.
10741 void SelectionDAGBuilder::visitPatchpoint(const CallBase &CB,
10742                                           const BasicBlock *EHPadBB) {
10743   // <ty> @llvm.experimental.patchpoint.<ty>(i64 <id>,
10744   //                                         i32 <numBytes>,
10745   //                                         i8* <target>,
10746   //                                         i32 <numArgs>,
10747   //                                         [Args...],
10748   //                                         [live variables...])
10749 
10750   CallingConv::ID CC = CB.getCallingConv();
10751   bool IsAnyRegCC = CC == CallingConv::AnyReg;
10752   bool HasDef = !CB.getType()->isVoidTy();
10753   SDLoc dl = getCurSDLoc();
10754   SDValue Callee = getValue(CB.getArgOperand(PatchPointOpers::TargetPos));
10755 
10756   // Handle immediate and symbolic callees.
10757   if (auto* ConstCallee = dyn_cast<ConstantSDNode>(Callee))
10758     Callee = DAG.getIntPtrConstant(ConstCallee->getZExtValue(), dl,
10759                                    /*isTarget=*/true);
10760   else if (auto* SymbolicCallee = dyn_cast<GlobalAddressSDNode>(Callee))
10761     Callee =  DAG.getTargetGlobalAddress(SymbolicCallee->getGlobal(),
10762                                          SDLoc(SymbolicCallee),
10763                                          SymbolicCallee->getValueType(0));
10764 
10765   // Get the real number of arguments participating in the call <numArgs>
10766   SDValue NArgVal = getValue(CB.getArgOperand(PatchPointOpers::NArgPos));
10767   unsigned NumArgs = NArgVal->getAsZExtVal();
10768 
10769   // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
10770   // Intrinsics include all meta-operands up to but not including CC.
10771   unsigned NumMetaOpers = PatchPointOpers::CCPos;
10772   assert(CB.arg_size() >= NumMetaOpers + NumArgs &&
10773          "Not enough arguments provided to the patchpoint intrinsic");
10774 
10775   // For AnyRegCC the arguments are lowered later on manually.
10776   unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
10777   Type *ReturnTy =
10778       IsAnyRegCC ? Type::getVoidTy(*DAG.getContext()) : CB.getType();
10779 
10780   TargetLowering::CallLoweringInfo CLI(DAG);
10781   populateCallLoweringInfo(CLI, &CB, NumMetaOpers, NumCallArgs, Callee,
10782                            ReturnTy, CB.getAttributes().getRetAttrs(), true);
10783   std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
10784 
10785   SDNode *CallEnd = Result.second.getNode();
10786   if (CallEnd->getOpcode() == ISD::EH_LABEL)
10787     CallEnd = CallEnd->getOperand(0).getNode();
10788   if (HasDef && (CallEnd->getOpcode() == ISD::CopyFromReg))
10789     CallEnd = CallEnd->getOperand(0).getNode();
10790 
10791   /// Get a call instruction from the call sequence chain.
10792   /// Tail calls are not allowed.
10793   assert(CallEnd->getOpcode() == ISD::CALLSEQ_END &&
10794          "Expected a callseq node.");
10795   SDNode *Call = CallEnd->getOperand(0).getNode();
10796   bool HasGlue = Call->getGluedNode();
10797 
10798   // Replace the target specific call node with the patchable intrinsic.
10799   SmallVector<SDValue, 8> Ops;
10800 
10801   // Push the chain.
10802   Ops.push_back(*(Call->op_begin()));
10803 
10804   // Optionally, push the glue (if any).
10805   if (HasGlue)
10806     Ops.push_back(*(Call->op_end() - 1));
10807 
10808   // Push the register mask info.
10809   if (HasGlue)
10810     Ops.push_back(*(Call->op_end() - 2));
10811   else
10812     Ops.push_back(*(Call->op_end() - 1));
10813 
10814   // Add the <id> and <numBytes> constants.
10815   SDValue IDVal = getValue(CB.getArgOperand(PatchPointOpers::IDPos));
10816   Ops.push_back(DAG.getTargetConstant(IDVal->getAsZExtVal(), dl, MVT::i64));
10817   SDValue NBytesVal = getValue(CB.getArgOperand(PatchPointOpers::NBytesPos));
10818   Ops.push_back(DAG.getTargetConstant(NBytesVal->getAsZExtVal(), dl, MVT::i32));
10819 
10820   // Add the callee.
10821   Ops.push_back(Callee);
10822 
10823   // Adjust <numArgs> to account for any arguments that have been passed on the
10824   // stack instead.
10825   // Call Node: Chain, Target, {Args}, RegMask, [Glue]
10826   unsigned NumCallRegArgs = Call->getNumOperands() - (HasGlue ? 4 : 3);
10827   NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs;
10828   Ops.push_back(DAG.getTargetConstant(NumCallRegArgs, dl, MVT::i32));
10829 
10830   // Add the calling convention
10831   Ops.push_back(DAG.getTargetConstant((unsigned)CC, dl, MVT::i32));
10832 
10833   // Add the arguments we omitted previously. The register allocator should
10834   // place these in any free register.
10835   if (IsAnyRegCC)
10836     for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i)
10837       Ops.push_back(getValue(CB.getArgOperand(i)));
10838 
10839   // Push the arguments from the call instruction.
10840   SDNode::op_iterator e = HasGlue ? Call->op_end()-2 : Call->op_end()-1;
10841   Ops.append(Call->op_begin() + 2, e);
10842 
10843   // Push live variables for the stack map.
10844   addStackMapLiveVars(CB, NumMetaOpers + NumArgs, dl, Ops, *this);
10845 
10846   SDVTList NodeTys;
10847   if (IsAnyRegCC && HasDef) {
10848     // Create the return types based on the intrinsic definition
10849     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10850     SmallVector<EVT, 3> ValueVTs;
10851     ComputeValueVTs(TLI, DAG.getDataLayout(), CB.getType(), ValueVTs);
10852     assert(ValueVTs.size() == 1 && "Expected only one return value type.");
10853 
10854     // There is always a chain and a glue type at the end
10855     ValueVTs.push_back(MVT::Other);
10856     ValueVTs.push_back(MVT::Glue);
10857     NodeTys = DAG.getVTList(ValueVTs);
10858   } else
10859     NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
10860 
10861   // Replace the target specific call node with a PATCHPOINT node.
10862   SDValue PPV = DAG.getNode(ISD::PATCHPOINT, dl, NodeTys, Ops);
10863 
10864   // Update the NodeMap.
10865   if (HasDef) {
10866     if (IsAnyRegCC)
10867       setValue(&CB, SDValue(PPV.getNode(), 0));
10868     else
10869       setValue(&CB, Result.first);
10870   }
10871 
10872   // Fixup the consumers of the intrinsic. The chain and glue may be used in the
10873   // call sequence. Furthermore the location of the chain and glue can change
10874   // when the AnyReg calling convention is used and the intrinsic returns a
10875   // value.
10876   if (IsAnyRegCC && HasDef) {
10877     SDValue From[] = {SDValue(Call, 0), SDValue(Call, 1)};
10878     SDValue To[] = {PPV.getValue(1), PPV.getValue(2)};
10879     DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
10880   } else
10881     DAG.ReplaceAllUsesWith(Call, PPV.getNode());
10882   DAG.DeleteNode(Call);
10883 
10884   // Inform the Frame Information that we have a patchpoint in this function.
10885   FuncInfo.MF->getFrameInfo().setHasPatchPoint();
10886 }
10887 
10888 void SelectionDAGBuilder::visitVectorReduce(const CallInst &I,
10889                                             unsigned Intrinsic) {
10890   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10891   SDValue Op1 = getValue(I.getArgOperand(0));
10892   SDValue Op2;
10893   if (I.arg_size() > 1)
10894     Op2 = getValue(I.getArgOperand(1));
10895   SDLoc dl = getCurSDLoc();
10896   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
10897   SDValue Res;
10898   SDNodeFlags SDFlags;
10899   if (auto *FPMO = dyn_cast<FPMathOperator>(&I))
10900     SDFlags.copyFMF(*FPMO);
10901 
10902   switch (Intrinsic) {
10903   case Intrinsic::vector_reduce_fadd:
10904     if (SDFlags.hasAllowReassociation())
10905       Res = DAG.getNode(ISD::FADD, dl, VT, Op1,
10906                         DAG.getNode(ISD::VECREDUCE_FADD, dl, VT, Op2, SDFlags),
10907                         SDFlags);
10908     else
10909       Res = DAG.getNode(ISD::VECREDUCE_SEQ_FADD, dl, VT, Op1, Op2, SDFlags);
10910     break;
10911   case Intrinsic::vector_reduce_fmul:
10912     if (SDFlags.hasAllowReassociation())
10913       Res = DAG.getNode(ISD::FMUL, dl, VT, Op1,
10914                         DAG.getNode(ISD::VECREDUCE_FMUL, dl, VT, Op2, SDFlags),
10915                         SDFlags);
10916     else
10917       Res = DAG.getNode(ISD::VECREDUCE_SEQ_FMUL, dl, VT, Op1, Op2, SDFlags);
10918     break;
10919   case Intrinsic::vector_reduce_add:
10920     Res = DAG.getNode(ISD::VECREDUCE_ADD, dl, VT, Op1);
10921     break;
10922   case Intrinsic::vector_reduce_mul:
10923     Res = DAG.getNode(ISD::VECREDUCE_MUL, dl, VT, Op1);
10924     break;
10925   case Intrinsic::vector_reduce_and:
10926     Res = DAG.getNode(ISD::VECREDUCE_AND, dl, VT, Op1);
10927     break;
10928   case Intrinsic::vector_reduce_or:
10929     Res = DAG.getNode(ISD::VECREDUCE_OR, dl, VT, Op1);
10930     break;
10931   case Intrinsic::vector_reduce_xor:
10932     Res = DAG.getNode(ISD::VECREDUCE_XOR, dl, VT, Op1);
10933     break;
10934   case Intrinsic::vector_reduce_smax:
10935     Res = DAG.getNode(ISD::VECREDUCE_SMAX, dl, VT, Op1);
10936     break;
10937   case Intrinsic::vector_reduce_smin:
10938     Res = DAG.getNode(ISD::VECREDUCE_SMIN, dl, VT, Op1);
10939     break;
10940   case Intrinsic::vector_reduce_umax:
10941     Res = DAG.getNode(ISD::VECREDUCE_UMAX, dl, VT, Op1);
10942     break;
10943   case Intrinsic::vector_reduce_umin:
10944     Res = DAG.getNode(ISD::VECREDUCE_UMIN, dl, VT, Op1);
10945     break;
10946   case Intrinsic::vector_reduce_fmax:
10947     Res = DAG.getNode(ISD::VECREDUCE_FMAX, dl, VT, Op1, SDFlags);
10948     break;
10949   case Intrinsic::vector_reduce_fmin:
10950     Res = DAG.getNode(ISD::VECREDUCE_FMIN, dl, VT, Op1, SDFlags);
10951     break;
10952   case Intrinsic::vector_reduce_fmaximum:
10953     Res = DAG.getNode(ISD::VECREDUCE_FMAXIMUM, dl, VT, Op1, SDFlags);
10954     break;
10955   case Intrinsic::vector_reduce_fminimum:
10956     Res = DAG.getNode(ISD::VECREDUCE_FMINIMUM, dl, VT, Op1, SDFlags);
10957     break;
10958   default:
10959     llvm_unreachable("Unhandled vector reduce intrinsic");
10960   }
10961   setValue(&I, Res);
10962 }
10963 
10964 /// Returns an AttributeList representing the attributes applied to the return
10965 /// value of the given call.
10966 static AttributeList getReturnAttrs(TargetLowering::CallLoweringInfo &CLI) {
10967   SmallVector<Attribute::AttrKind, 2> Attrs;
10968   if (CLI.RetSExt)
10969     Attrs.push_back(Attribute::SExt);
10970   if (CLI.RetZExt)
10971     Attrs.push_back(Attribute::ZExt);
10972   if (CLI.IsInReg)
10973     Attrs.push_back(Attribute::InReg);
10974 
10975   return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex,
10976                             Attrs);
10977 }
10978 
10979 /// TargetLowering::LowerCallTo - This is the default LowerCallTo
10980 /// implementation, which just calls LowerCall.
10981 /// FIXME: When all targets are
10982 /// migrated to using LowerCall, this hook should be integrated into SDISel.
10983 std::pair<SDValue, SDValue>
10984 TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
10985   // Handle the incoming return values from the call.
10986   CLI.Ins.clear();
10987   Type *OrigRetTy = CLI.RetTy;
10988   SmallVector<EVT, 4> RetTys;
10989   SmallVector<TypeSize, 4> Offsets;
10990   auto &DL = CLI.DAG.getDataLayout();
10991   ComputeValueVTs(*this, DL, CLI.RetTy, RetTys, &Offsets);
10992 
10993   if (CLI.IsPostTypeLegalization) {
10994     // If we are lowering a libcall after legalization, split the return type.
10995     SmallVector<EVT, 4> OldRetTys;
10996     SmallVector<TypeSize, 4> OldOffsets;
10997     RetTys.swap(OldRetTys);
10998     Offsets.swap(OldOffsets);
10999 
11000     for (size_t i = 0, e = OldRetTys.size(); i != e; ++i) {
11001       EVT RetVT = OldRetTys[i];
11002       uint64_t Offset = OldOffsets[i];
11003       MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), RetVT);
11004       unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), RetVT);
11005       unsigned RegisterVTByteSZ = RegisterVT.getSizeInBits() / 8;
11006       RetTys.append(NumRegs, RegisterVT);
11007       for (unsigned j = 0; j != NumRegs; ++j)
11008         Offsets.push_back(TypeSize::getFixed(Offset + j * RegisterVTByteSZ));
11009     }
11010   }
11011 
11012   SmallVector<ISD::OutputArg, 4> Outs;
11013   GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, *this, DL);
11014 
11015   bool CanLowerReturn =
11016       this->CanLowerReturn(CLI.CallConv, CLI.DAG.getMachineFunction(),
11017                            CLI.IsVarArg, Outs, CLI.RetTy->getContext());
11018 
11019   SDValue DemoteStackSlot;
11020   int DemoteStackIdx = -100;
11021   if (!CanLowerReturn) {
11022     // FIXME: equivalent assert?
11023     // assert(!CS.hasInAllocaArgument() &&
11024     //        "sret demotion is incompatible with inalloca");
11025     uint64_t TySize = DL.getTypeAllocSize(CLI.RetTy);
11026     Align Alignment = DL.getPrefTypeAlign(CLI.RetTy);
11027     MachineFunction &MF = CLI.DAG.getMachineFunction();
11028     DemoteStackIdx =
11029         MF.getFrameInfo().CreateStackObject(TySize, Alignment, false);
11030     Type *StackSlotPtrType = PointerType::get(CLI.RetTy,
11031                                               DL.getAllocaAddrSpace());
11032 
11033     DemoteStackSlot = CLI.DAG.getFrameIndex(DemoteStackIdx, getFrameIndexTy(DL));
11034     ArgListEntry Entry;
11035     Entry.Node = DemoteStackSlot;
11036     Entry.Ty = StackSlotPtrType;
11037     Entry.IsSExt = false;
11038     Entry.IsZExt = false;
11039     Entry.IsInReg = false;
11040     Entry.IsSRet = true;
11041     Entry.IsNest = false;
11042     Entry.IsByVal = false;
11043     Entry.IsByRef = false;
11044     Entry.IsReturned = false;
11045     Entry.IsSwiftSelf = false;
11046     Entry.IsSwiftAsync = false;
11047     Entry.IsSwiftError = false;
11048     Entry.IsCFGuardTarget = false;
11049     Entry.Alignment = Alignment;
11050     CLI.getArgs().insert(CLI.getArgs().begin(), Entry);
11051     CLI.NumFixedArgs += 1;
11052     CLI.getArgs()[0].IndirectType = CLI.RetTy;
11053     CLI.RetTy = Type::getVoidTy(CLI.RetTy->getContext());
11054 
11055     // sret demotion isn't compatible with tail-calls, since the sret argument
11056     // points into the callers stack frame.
11057     CLI.IsTailCall = false;
11058   } else {
11059     bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
11060         CLI.RetTy, CLI.CallConv, CLI.IsVarArg, DL);
11061     for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
11062       ISD::ArgFlagsTy Flags;
11063       if (NeedsRegBlock) {
11064         Flags.setInConsecutiveRegs();
11065         if (I == RetTys.size() - 1)
11066           Flags.setInConsecutiveRegsLast();
11067       }
11068       EVT VT = RetTys[I];
11069       MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
11070                                                      CLI.CallConv, VT);
11071       unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
11072                                                        CLI.CallConv, VT);
11073       for (unsigned i = 0; i != NumRegs; ++i) {
11074         ISD::InputArg MyFlags;
11075         MyFlags.Flags = Flags;
11076         MyFlags.VT = RegisterVT;
11077         MyFlags.ArgVT = VT;
11078         MyFlags.Used = CLI.IsReturnValueUsed;
11079         if (CLI.RetTy->isPointerTy()) {
11080           MyFlags.Flags.setPointer();
11081           MyFlags.Flags.setPointerAddrSpace(
11082               cast<PointerType>(CLI.RetTy)->getAddressSpace());
11083         }
11084         if (CLI.RetSExt)
11085           MyFlags.Flags.setSExt();
11086         if (CLI.RetZExt)
11087           MyFlags.Flags.setZExt();
11088         if (CLI.IsInReg)
11089           MyFlags.Flags.setInReg();
11090         CLI.Ins.push_back(MyFlags);
11091       }
11092     }
11093   }
11094 
11095   // We push in swifterror return as the last element of CLI.Ins.
11096   ArgListTy &Args = CLI.getArgs();
11097   if (supportSwiftError()) {
11098     for (const ArgListEntry &Arg : Args) {
11099       if (Arg.IsSwiftError) {
11100         ISD::InputArg MyFlags;
11101         MyFlags.VT = getPointerTy(DL);
11102         MyFlags.ArgVT = EVT(getPointerTy(DL));
11103         MyFlags.Flags.setSwiftError();
11104         CLI.Ins.push_back(MyFlags);
11105       }
11106     }
11107   }
11108 
11109   // Handle all of the outgoing arguments.
11110   CLI.Outs.clear();
11111   CLI.OutVals.clear();
11112   for (unsigned i = 0, e = Args.size(); i != e; ++i) {
11113     SmallVector<EVT, 4> ValueVTs;
11114     ComputeValueVTs(*this, DL, Args[i].Ty, ValueVTs);
11115     // FIXME: Split arguments if CLI.IsPostTypeLegalization
11116     Type *FinalType = Args[i].Ty;
11117     if (Args[i].IsByVal)
11118       FinalType = Args[i].IndirectType;
11119     bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
11120         FinalType, CLI.CallConv, CLI.IsVarArg, DL);
11121     for (unsigned Value = 0, NumValues = ValueVTs.size(); Value != NumValues;
11122          ++Value) {
11123       EVT VT = ValueVTs[Value];
11124       Type *ArgTy = VT.getTypeForEVT(CLI.RetTy->getContext());
11125       SDValue Op = SDValue(Args[i].Node.getNode(),
11126                            Args[i].Node.getResNo() + Value);
11127       ISD::ArgFlagsTy Flags;
11128 
11129       // Certain targets (such as MIPS), may have a different ABI alignment
11130       // for a type depending on the context. Give the target a chance to
11131       // specify the alignment it wants.
11132       const Align OriginalAlignment(getABIAlignmentForCallingConv(ArgTy, DL));
11133       Flags.setOrigAlign(OriginalAlignment);
11134 
11135       if (Args[i].Ty->isPointerTy()) {
11136         Flags.setPointer();
11137         Flags.setPointerAddrSpace(
11138             cast<PointerType>(Args[i].Ty)->getAddressSpace());
11139       }
11140       if (Args[i].IsZExt)
11141         Flags.setZExt();
11142       if (Args[i].IsSExt)
11143         Flags.setSExt();
11144       if (Args[i].IsNoExt)
11145         Flags.setNoExt();
11146       if (Args[i].IsInReg) {
11147         // If we are using vectorcall calling convention, a structure that is
11148         // passed InReg - is surely an HVA
11149         if (CLI.CallConv == CallingConv::X86_VectorCall &&
11150             isa<StructType>(FinalType)) {
11151           // The first value of a structure is marked
11152           if (0 == Value)
11153             Flags.setHvaStart();
11154           Flags.setHva();
11155         }
11156         // Set InReg Flag
11157         Flags.setInReg();
11158       }
11159       if (Args[i].IsSRet)
11160         Flags.setSRet();
11161       if (Args[i].IsSwiftSelf)
11162         Flags.setSwiftSelf();
11163       if (Args[i].IsSwiftAsync)
11164         Flags.setSwiftAsync();
11165       if (Args[i].IsSwiftError)
11166         Flags.setSwiftError();
11167       if (Args[i].IsCFGuardTarget)
11168         Flags.setCFGuardTarget();
11169       if (Args[i].IsByVal)
11170         Flags.setByVal();
11171       if (Args[i].IsByRef)
11172         Flags.setByRef();
11173       if (Args[i].IsPreallocated) {
11174         Flags.setPreallocated();
11175         // Set the byval flag for CCAssignFn callbacks that don't know about
11176         // preallocated.  This way we can know how many bytes we should've
11177         // allocated and how many bytes a callee cleanup function will pop.  If
11178         // we port preallocated to more targets, we'll have to add custom
11179         // preallocated handling in the various CC lowering callbacks.
11180         Flags.setByVal();
11181       }
11182       if (Args[i].IsInAlloca) {
11183         Flags.setInAlloca();
11184         // Set the byval flag for CCAssignFn callbacks that don't know about
11185         // inalloca.  This way we can know how many bytes we should've allocated
11186         // and how many bytes a callee cleanup function will pop.  If we port
11187         // inalloca to more targets, we'll have to add custom inalloca handling
11188         // in the various CC lowering callbacks.
11189         Flags.setByVal();
11190       }
11191       Align MemAlign;
11192       if (Args[i].IsByVal || Args[i].IsInAlloca || Args[i].IsPreallocated) {
11193         unsigned FrameSize = DL.getTypeAllocSize(Args[i].IndirectType);
11194         Flags.setByValSize(FrameSize);
11195 
11196         // info is not there but there are cases it cannot get right.
11197         if (auto MA = Args[i].Alignment)
11198           MemAlign = *MA;
11199         else
11200           MemAlign = getByValTypeAlignment(Args[i].IndirectType, DL);
11201       } else if (auto MA = Args[i].Alignment) {
11202         MemAlign = *MA;
11203       } else {
11204         MemAlign = OriginalAlignment;
11205       }
11206       Flags.setMemAlign(MemAlign);
11207       if (Args[i].IsNest)
11208         Flags.setNest();
11209       if (NeedsRegBlock)
11210         Flags.setInConsecutiveRegs();
11211 
11212       MVT PartVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
11213                                                  CLI.CallConv, VT);
11214       unsigned NumParts = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
11215                                                         CLI.CallConv, VT);
11216       SmallVector<SDValue, 4> Parts(NumParts);
11217       ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
11218 
11219       if (Args[i].IsSExt)
11220         ExtendKind = ISD::SIGN_EXTEND;
11221       else if (Args[i].IsZExt)
11222         ExtendKind = ISD::ZERO_EXTEND;
11223 
11224       // Conservatively only handle 'returned' on non-vectors that can be lowered,
11225       // for now.
11226       if (Args[i].IsReturned && !Op.getValueType().isVector() &&
11227           CanLowerReturn) {
11228         assert((CLI.RetTy == Args[i].Ty ||
11229                 (CLI.RetTy->isPointerTy() && Args[i].Ty->isPointerTy() &&
11230                  CLI.RetTy->getPointerAddressSpace() ==
11231                      Args[i].Ty->getPointerAddressSpace())) &&
11232                RetTys.size() == NumValues && "unexpected use of 'returned'");
11233         // Before passing 'returned' to the target lowering code, ensure that
11234         // either the register MVT and the actual EVT are the same size or that
11235         // the return value and argument are extended in the same way; in these
11236         // cases it's safe to pass the argument register value unchanged as the
11237         // return register value (although it's at the target's option whether
11238         // to do so)
11239         // TODO: allow code generation to take advantage of partially preserved
11240         // registers rather than clobbering the entire register when the
11241         // parameter extension method is not compatible with the return
11242         // extension method
11243         if ((NumParts * PartVT.getSizeInBits() == VT.getSizeInBits()) ||
11244             (ExtendKind != ISD::ANY_EXTEND && CLI.RetSExt == Args[i].IsSExt &&
11245              CLI.RetZExt == Args[i].IsZExt))
11246           Flags.setReturned();
11247       }
11248 
11249       getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT, CLI.CB,
11250                      CLI.CallConv, ExtendKind);
11251 
11252       for (unsigned j = 0; j != NumParts; ++j) {
11253         // if it isn't first piece, alignment must be 1
11254         // For scalable vectors the scalable part is currently handled
11255         // by individual targets, so we just use the known minimum size here.
11256         ISD::OutputArg MyFlags(
11257             Flags, Parts[j].getValueType().getSimpleVT(), VT,
11258             i < CLI.NumFixedArgs, i,
11259             j * Parts[j].getValueType().getStoreSize().getKnownMinValue());
11260         if (NumParts > 1 && j == 0)
11261           MyFlags.Flags.setSplit();
11262         else if (j != 0) {
11263           MyFlags.Flags.setOrigAlign(Align(1));
11264           if (j == NumParts - 1)
11265             MyFlags.Flags.setSplitEnd();
11266         }
11267 
11268         CLI.Outs.push_back(MyFlags);
11269         CLI.OutVals.push_back(Parts[j]);
11270       }
11271 
11272       if (NeedsRegBlock && Value == NumValues - 1)
11273         CLI.Outs[CLI.Outs.size() - 1].Flags.setInConsecutiveRegsLast();
11274     }
11275   }
11276 
11277   SmallVector<SDValue, 4> InVals;
11278   CLI.Chain = LowerCall(CLI, InVals);
11279 
11280   // Update CLI.InVals to use outside of this function.
11281   CLI.InVals = InVals;
11282 
11283   // Verify that the target's LowerCall behaved as expected.
11284   assert(CLI.Chain.getNode() && CLI.Chain.getValueType() == MVT::Other &&
11285          "LowerCall didn't return a valid chain!");
11286   assert((!CLI.IsTailCall || InVals.empty()) &&
11287          "LowerCall emitted a return value for a tail call!");
11288   assert((CLI.IsTailCall || InVals.size() == CLI.Ins.size()) &&
11289          "LowerCall didn't emit the correct number of values!");
11290 
11291   // For a tail call, the return value is merely live-out and there aren't
11292   // any nodes in the DAG representing it. Return a special value to
11293   // indicate that a tail call has been emitted and no more Instructions
11294   // should be processed in the current block.
11295   if (CLI.IsTailCall) {
11296     CLI.DAG.setRoot(CLI.Chain);
11297     return std::make_pair(SDValue(), SDValue());
11298   }
11299 
11300 #ifndef NDEBUG
11301   for (unsigned i = 0, e = CLI.Ins.size(); i != e; ++i) {
11302     assert(InVals[i].getNode() && "LowerCall emitted a null value!");
11303     assert(EVT(CLI.Ins[i].VT) == InVals[i].getValueType() &&
11304            "LowerCall emitted a value with the wrong type!");
11305   }
11306 #endif
11307 
11308   SmallVector<SDValue, 4> ReturnValues;
11309   if (!CanLowerReturn) {
11310     // The instruction result is the result of loading from the
11311     // hidden sret parameter.
11312     SmallVector<EVT, 1> PVTs;
11313     Type *PtrRetTy =
11314         PointerType::get(OrigRetTy->getContext(), DL.getAllocaAddrSpace());
11315 
11316     ComputeValueVTs(*this, DL, PtrRetTy, PVTs);
11317     assert(PVTs.size() == 1 && "Pointers should fit in one register");
11318     EVT PtrVT = PVTs[0];
11319 
11320     unsigned NumValues = RetTys.size();
11321     ReturnValues.resize(NumValues);
11322     SmallVector<SDValue, 4> Chains(NumValues);
11323 
11324     // An aggregate return value cannot wrap around the address space, so
11325     // offsets to its parts don't wrap either.
11326     MachineFunction &MF = CLI.DAG.getMachineFunction();
11327     Align HiddenSRetAlign = MF.getFrameInfo().getObjectAlign(DemoteStackIdx);
11328     for (unsigned i = 0; i < NumValues; ++i) {
11329       SDValue Add =
11330           CLI.DAG.getNode(ISD::ADD, CLI.DL, PtrVT, DemoteStackSlot,
11331                           CLI.DAG.getConstant(Offsets[i], CLI.DL, PtrVT),
11332                           SDNodeFlags::NoUnsignedWrap);
11333       SDValue L = CLI.DAG.getLoad(
11334           RetTys[i], CLI.DL, CLI.Chain, Add,
11335           MachinePointerInfo::getFixedStack(CLI.DAG.getMachineFunction(),
11336                                             DemoteStackIdx, Offsets[i]),
11337           HiddenSRetAlign);
11338       ReturnValues[i] = L;
11339       Chains[i] = L.getValue(1);
11340     }
11341 
11342     CLI.Chain = CLI.DAG.getNode(ISD::TokenFactor, CLI.DL, MVT::Other, Chains);
11343   } else {
11344     // Collect the legal value parts into potentially illegal values
11345     // that correspond to the original function's return values.
11346     std::optional<ISD::NodeType> AssertOp;
11347     if (CLI.RetSExt)
11348       AssertOp = ISD::AssertSext;
11349     else if (CLI.RetZExt)
11350       AssertOp = ISD::AssertZext;
11351     unsigned CurReg = 0;
11352     for (EVT VT : RetTys) {
11353       MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
11354                                                      CLI.CallConv, VT);
11355       unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
11356                                                        CLI.CallConv, VT);
11357 
11358       ReturnValues.push_back(getCopyFromParts(
11359           CLI.DAG, CLI.DL, &InVals[CurReg], NumRegs, RegisterVT, VT, nullptr,
11360           CLI.Chain, CLI.CallConv, AssertOp));
11361       CurReg += NumRegs;
11362     }
11363 
11364     // For a function returning void, there is no return value. We can't create
11365     // such a node, so we just return a null return value in that case. In
11366     // that case, nothing will actually look at the value.
11367     if (ReturnValues.empty())
11368       return std::make_pair(SDValue(), CLI.Chain);
11369   }
11370 
11371   SDValue Res = CLI.DAG.getNode(ISD::MERGE_VALUES, CLI.DL,
11372                                 CLI.DAG.getVTList(RetTys), ReturnValues);
11373   return std::make_pair(Res, CLI.Chain);
11374 }
11375 
11376 /// Places new result values for the node in Results (their number
11377 /// and types must exactly match those of the original return values of
11378 /// the node), or leaves Results empty, which indicates that the node is not
11379 /// to be custom lowered after all.
11380 void TargetLowering::LowerOperationWrapper(SDNode *N,
11381                                            SmallVectorImpl<SDValue> &Results,
11382                                            SelectionDAG &DAG) const {
11383   SDValue Res = LowerOperation(SDValue(N, 0), DAG);
11384 
11385   if (!Res.getNode())
11386     return;
11387 
11388   // If the original node has one result, take the return value from
11389   // LowerOperation as is. It might not be result number 0.
11390   if (N->getNumValues() == 1) {
11391     Results.push_back(Res);
11392     return;
11393   }
11394 
11395   // If the original node has multiple results, then the return node should
11396   // have the same number of results.
11397   assert((N->getNumValues() == Res->getNumValues()) &&
11398       "Lowering returned the wrong number of results!");
11399 
11400   // Places new result values base on N result number.
11401   for (unsigned I = 0, E = N->getNumValues(); I != E; ++I)
11402     Results.push_back(Res.getValue(I));
11403 }
11404 
11405 SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
11406   llvm_unreachable("LowerOperation not implemented for this target!");
11407 }
11408 
11409 void SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V,
11410                                                      unsigned Reg,
11411                                                      ISD::NodeType ExtendType) {
11412   SDValue Op = getNonRegisterValue(V);
11413   assert((Op.getOpcode() != ISD::CopyFromReg ||
11414           cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
11415          "Copy from a reg to the same reg!");
11416   assert(!Register::isPhysicalRegister(Reg) && "Is a physreg");
11417 
11418   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11419   // If this is an InlineAsm we have to match the registers required, not the
11420   // notional registers required by the type.
11421 
11422   RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg, V->getType(),
11423                    std::nullopt); // This is not an ABI copy.
11424   SDValue Chain = DAG.getEntryNode();
11425 
11426   if (ExtendType == ISD::ANY_EXTEND) {
11427     auto PreferredExtendIt = FuncInfo.PreferredExtendType.find(V);
11428     if (PreferredExtendIt != FuncInfo.PreferredExtendType.end())
11429       ExtendType = PreferredExtendIt->second;
11430   }
11431   RFV.getCopyToRegs(Op, DAG, getCurSDLoc(), Chain, nullptr, V, ExtendType);
11432   PendingExports.push_back(Chain);
11433 }
11434 
11435 #include "llvm/CodeGen/SelectionDAGISel.h"
11436 
11437 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
11438 /// entry block, return true.  This includes arguments used by switches, since
11439 /// the switch may expand into multiple basic blocks.
11440 static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) {
11441   // With FastISel active, we may be splitting blocks, so force creation
11442   // of virtual registers for all non-dead arguments.
11443   if (FastISel)
11444     return A->use_empty();
11445 
11446   const BasicBlock &Entry = A->getParent()->front();
11447   for (const User *U : A->users())
11448     if (cast<Instruction>(U)->getParent() != &Entry || isa<SwitchInst>(U))
11449       return false;  // Use not in entry block.
11450 
11451   return true;
11452 }
11453 
11454 using ArgCopyElisionMapTy =
11455     DenseMap<const Argument *,
11456              std::pair<const AllocaInst *, const StoreInst *>>;
11457 
11458 /// Scan the entry block of the function in FuncInfo for arguments that look
11459 /// like copies into a local alloca. Record any copied arguments in
11460 /// ArgCopyElisionCandidates.
11461 static void
11462 findArgumentCopyElisionCandidates(const DataLayout &DL,
11463                                   FunctionLoweringInfo *FuncInfo,
11464                                   ArgCopyElisionMapTy &ArgCopyElisionCandidates) {
11465   // Record the state of every static alloca used in the entry block. Argument
11466   // allocas are all used in the entry block, so we need approximately as many
11467   // entries as we have arguments.
11468   enum StaticAllocaInfo { Unknown, Clobbered, Elidable };
11469   SmallDenseMap<const AllocaInst *, StaticAllocaInfo, 8> StaticAllocas;
11470   unsigned NumArgs = FuncInfo->Fn->arg_size();
11471   StaticAllocas.reserve(NumArgs * 2);
11472 
11473   auto GetInfoIfStaticAlloca = [&](const Value *V) -> StaticAllocaInfo * {
11474     if (!V)
11475       return nullptr;
11476     V = V->stripPointerCasts();
11477     const auto *AI = dyn_cast<AllocaInst>(V);
11478     if (!AI || !AI->isStaticAlloca() || !FuncInfo->StaticAllocaMap.count(AI))
11479       return nullptr;
11480     auto Iter = StaticAllocas.insert({AI, Unknown});
11481     return &Iter.first->second;
11482   };
11483 
11484   // Look for stores of arguments to static allocas. Look through bitcasts and
11485   // GEPs to handle type coercions, as long as the alloca is fully initialized
11486   // by the store. Any non-store use of an alloca escapes it and any subsequent
11487   // unanalyzed store might write it.
11488   // FIXME: Handle structs initialized with multiple stores.
11489   for (const Instruction &I : FuncInfo->Fn->getEntryBlock()) {
11490     // Look for stores, and handle non-store uses conservatively.
11491     const auto *SI = dyn_cast<StoreInst>(&I);
11492     if (!SI) {
11493       // We will look through cast uses, so ignore them completely.
11494       if (I.isCast())
11495         continue;
11496       // Ignore debug info and pseudo op intrinsics, they don't escape or store
11497       // to allocas.
11498       if (I.isDebugOrPseudoInst())
11499         continue;
11500       // This is an unknown instruction. Assume it escapes or writes to all
11501       // static alloca operands.
11502       for (const Use &U : I.operands()) {
11503         if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(U))
11504           *Info = StaticAllocaInfo::Clobbered;
11505       }
11506       continue;
11507     }
11508 
11509     // If the stored value is a static alloca, mark it as escaped.
11510     if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(SI->getValueOperand()))
11511       *Info = StaticAllocaInfo::Clobbered;
11512 
11513     // Check if the destination is a static alloca.
11514     const Value *Dst = SI->getPointerOperand()->stripPointerCasts();
11515     StaticAllocaInfo *Info = GetInfoIfStaticAlloca(Dst);
11516     if (!Info)
11517       continue;
11518     const AllocaInst *AI = cast<AllocaInst>(Dst);
11519 
11520     // Skip allocas that have been initialized or clobbered.
11521     if (*Info != StaticAllocaInfo::Unknown)
11522       continue;
11523 
11524     // Check if the stored value is an argument, and that this store fully
11525     // initializes the alloca.
11526     // If the argument type has padding bits we can't directly forward a pointer
11527     // as the upper bits may contain garbage.
11528     // Don't elide copies from the same argument twice.
11529     const Value *Val = SI->getValueOperand()->stripPointerCasts();
11530     const auto *Arg = dyn_cast<Argument>(Val);
11531     if (!Arg || Arg->hasPassPointeeByValueCopyAttr() ||
11532         Arg->getType()->isEmptyTy() ||
11533         DL.getTypeStoreSize(Arg->getType()) !=
11534             DL.getTypeAllocSize(AI->getAllocatedType()) ||
11535         !DL.typeSizeEqualsStoreSize(Arg->getType()) ||
11536         ArgCopyElisionCandidates.count(Arg)) {
11537       *Info = StaticAllocaInfo::Clobbered;
11538       continue;
11539     }
11540 
11541     LLVM_DEBUG(dbgs() << "Found argument copy elision candidate: " << *AI
11542                       << '\n');
11543 
11544     // Mark this alloca and store for argument copy elision.
11545     *Info = StaticAllocaInfo::Elidable;
11546     ArgCopyElisionCandidates.insert({Arg, {AI, SI}});
11547 
11548     // Stop scanning if we've seen all arguments. This will happen early in -O0
11549     // builds, which is useful, because -O0 builds have large entry blocks and
11550     // many allocas.
11551     if (ArgCopyElisionCandidates.size() == NumArgs)
11552       break;
11553   }
11554 }
11555 
11556 /// Try to elide argument copies from memory into a local alloca. Succeeds if
11557 /// ArgVal is a load from a suitable fixed stack object.
11558 static void tryToElideArgumentCopy(
11559     FunctionLoweringInfo &FuncInfo, SmallVectorImpl<SDValue> &Chains,
11560     DenseMap<int, int> &ArgCopyElisionFrameIndexMap,
11561     SmallPtrSetImpl<const Instruction *> &ElidedArgCopyInstrs,
11562     ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg,
11563     ArrayRef<SDValue> ArgVals, bool &ArgHasUses) {
11564   // Check if this is a load from a fixed stack object.
11565   auto *LNode = dyn_cast<LoadSDNode>(ArgVals[0]);
11566   if (!LNode)
11567     return;
11568   auto *FINode = dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode());
11569   if (!FINode)
11570     return;
11571 
11572   // Check that the fixed stack object is the right size and alignment.
11573   // Look at the alignment that the user wrote on the alloca instead of looking
11574   // at the stack object.
11575   auto ArgCopyIter = ArgCopyElisionCandidates.find(&Arg);
11576   assert(ArgCopyIter != ArgCopyElisionCandidates.end());
11577   const AllocaInst *AI = ArgCopyIter->second.first;
11578   int FixedIndex = FINode->getIndex();
11579   int &AllocaIndex = FuncInfo.StaticAllocaMap[AI];
11580   int OldIndex = AllocaIndex;
11581   MachineFrameInfo &MFI = FuncInfo.MF->getFrameInfo();
11582   if (MFI.getObjectSize(FixedIndex) != MFI.getObjectSize(OldIndex)) {
11583     LLVM_DEBUG(
11584         dbgs() << "  argument copy elision failed due to bad fixed stack "
11585                   "object size\n");
11586     return;
11587   }
11588   Align RequiredAlignment = AI->getAlign();
11589   if (MFI.getObjectAlign(FixedIndex) < RequiredAlignment) {
11590     LLVM_DEBUG(dbgs() << "  argument copy elision failed: alignment of alloca "
11591                          "greater than stack argument alignment ("
11592                       << DebugStr(RequiredAlignment) << " vs "
11593                       << DebugStr(MFI.getObjectAlign(FixedIndex)) << ")\n");
11594     return;
11595   }
11596 
11597   // Perform the elision. Delete the old stack object and replace its only use
11598   // in the variable info map. Mark the stack object as mutable and aliased.
11599   LLVM_DEBUG({
11600     dbgs() << "Eliding argument copy from " << Arg << " to " << *AI << '\n'
11601            << "  Replacing frame index " << OldIndex << " with " << FixedIndex
11602            << '\n';
11603   });
11604   MFI.RemoveStackObject(OldIndex);
11605   MFI.setIsImmutableObjectIndex(FixedIndex, false);
11606   MFI.setIsAliasedObjectIndex(FixedIndex, true);
11607   AllocaIndex = FixedIndex;
11608   ArgCopyElisionFrameIndexMap.insert({OldIndex, FixedIndex});
11609   for (SDValue ArgVal : ArgVals)
11610     Chains.push_back(ArgVal.getValue(1));
11611 
11612   // Avoid emitting code for the store implementing the copy.
11613   const StoreInst *SI = ArgCopyIter->second.second;
11614   ElidedArgCopyInstrs.insert(SI);
11615 
11616   // Check for uses of the argument again so that we can avoid exporting ArgVal
11617   // if it is't used by anything other than the store.
11618   for (const Value *U : Arg.users()) {
11619     if (U != SI) {
11620       ArgHasUses = true;
11621       break;
11622     }
11623   }
11624 }
11625 
11626 void SelectionDAGISel::LowerArguments(const Function &F) {
11627   SelectionDAG &DAG = SDB->DAG;
11628   SDLoc dl = SDB->getCurSDLoc();
11629   const DataLayout &DL = DAG.getDataLayout();
11630   SmallVector<ISD::InputArg, 16> Ins;
11631 
11632   // In Naked functions we aren't going to save any registers.
11633   if (F.hasFnAttribute(Attribute::Naked))
11634     return;
11635 
11636   if (!FuncInfo->CanLowerReturn) {
11637     // Put in an sret pointer parameter before all the other parameters.
11638     SmallVector<EVT, 1> ValueVTs;
11639     ComputeValueVTs(*TLI, DAG.getDataLayout(),
11640                     PointerType::get(F.getContext(),
11641                                      DAG.getDataLayout().getAllocaAddrSpace()),
11642                     ValueVTs);
11643 
11644     // NOTE: Assuming that a pointer will never break down to more than one VT
11645     // or one register.
11646     ISD::ArgFlagsTy Flags;
11647     Flags.setSRet();
11648     MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVTs[0]);
11649     ISD::InputArg RetArg(Flags, RegisterVT, ValueVTs[0], true,
11650                          ISD::InputArg::NoArgIndex, 0);
11651     Ins.push_back(RetArg);
11652   }
11653 
11654   // Look for stores of arguments to static allocas. Mark such arguments with a
11655   // flag to ask the target to give us the memory location of that argument if
11656   // available.
11657   ArgCopyElisionMapTy ArgCopyElisionCandidates;
11658   findArgumentCopyElisionCandidates(DL, FuncInfo.get(),
11659                                     ArgCopyElisionCandidates);
11660 
11661   // Set up the incoming argument description vector.
11662   for (const Argument &Arg : F.args()) {
11663     unsigned ArgNo = Arg.getArgNo();
11664     SmallVector<EVT, 4> ValueVTs;
11665     ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
11666     bool isArgValueUsed = !Arg.use_empty();
11667     unsigned PartBase = 0;
11668     Type *FinalType = Arg.getType();
11669     if (Arg.hasAttribute(Attribute::ByVal))
11670       FinalType = Arg.getParamByValType();
11671     bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
11672         FinalType, F.getCallingConv(), F.isVarArg(), DL);
11673     for (unsigned Value = 0, NumValues = ValueVTs.size();
11674          Value != NumValues; ++Value) {
11675       EVT VT = ValueVTs[Value];
11676       Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
11677       ISD::ArgFlagsTy Flags;
11678 
11679 
11680       if (Arg.getType()->isPointerTy()) {
11681         Flags.setPointer();
11682         Flags.setPointerAddrSpace(
11683             cast<PointerType>(Arg.getType())->getAddressSpace());
11684       }
11685       if (Arg.hasAttribute(Attribute::ZExt))
11686         Flags.setZExt();
11687       if (Arg.hasAttribute(Attribute::SExt))
11688         Flags.setSExt();
11689       if (Arg.hasAttribute(Attribute::InReg)) {
11690         // If we are using vectorcall calling convention, a structure that is
11691         // passed InReg - is surely an HVA
11692         if (F.getCallingConv() == CallingConv::X86_VectorCall &&
11693             isa<StructType>(Arg.getType())) {
11694           // The first value of a structure is marked
11695           if (0 == Value)
11696             Flags.setHvaStart();
11697           Flags.setHva();
11698         }
11699         // Set InReg Flag
11700         Flags.setInReg();
11701       }
11702       if (Arg.hasAttribute(Attribute::StructRet))
11703         Flags.setSRet();
11704       if (Arg.hasAttribute(Attribute::SwiftSelf))
11705         Flags.setSwiftSelf();
11706       if (Arg.hasAttribute(Attribute::SwiftAsync))
11707         Flags.setSwiftAsync();
11708       if (Arg.hasAttribute(Attribute::SwiftError))
11709         Flags.setSwiftError();
11710       if (Arg.hasAttribute(Attribute::ByVal))
11711         Flags.setByVal();
11712       if (Arg.hasAttribute(Attribute::ByRef))
11713         Flags.setByRef();
11714       if (Arg.hasAttribute(Attribute::InAlloca)) {
11715         Flags.setInAlloca();
11716         // Set the byval flag for CCAssignFn callbacks that don't know about
11717         // inalloca.  This way we can know how many bytes we should've allocated
11718         // and how many bytes a callee cleanup function will pop.  If we port
11719         // inalloca to more targets, we'll have to add custom inalloca handling
11720         // in the various CC lowering callbacks.
11721         Flags.setByVal();
11722       }
11723       if (Arg.hasAttribute(Attribute::Preallocated)) {
11724         Flags.setPreallocated();
11725         // Set the byval flag for CCAssignFn callbacks that don't know about
11726         // preallocated.  This way we can know how many bytes we should've
11727         // allocated and how many bytes a callee cleanup function will pop.  If
11728         // we port preallocated to more targets, we'll have to add custom
11729         // preallocated handling in the various CC lowering callbacks.
11730         Flags.setByVal();
11731       }
11732 
11733       // Certain targets (such as MIPS), may have a different ABI alignment
11734       // for a type depending on the context. Give the target a chance to
11735       // specify the alignment it wants.
11736       const Align OriginalAlignment(
11737           TLI->getABIAlignmentForCallingConv(ArgTy, DL));
11738       Flags.setOrigAlign(OriginalAlignment);
11739 
11740       Align MemAlign;
11741       Type *ArgMemTy = nullptr;
11742       if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated() ||
11743           Flags.isByRef()) {
11744         if (!ArgMemTy)
11745           ArgMemTy = Arg.getPointeeInMemoryValueType();
11746 
11747         uint64_t MemSize = DL.getTypeAllocSize(ArgMemTy);
11748 
11749         // For in-memory arguments, size and alignment should be passed from FE.
11750         // BE will guess if this info is not there but there are cases it cannot
11751         // get right.
11752         if (auto ParamAlign = Arg.getParamStackAlign())
11753           MemAlign = *ParamAlign;
11754         else if ((ParamAlign = Arg.getParamAlign()))
11755           MemAlign = *ParamAlign;
11756         else
11757           MemAlign = TLI->getByValTypeAlignment(ArgMemTy, DL);
11758         if (Flags.isByRef())
11759           Flags.setByRefSize(MemSize);
11760         else
11761           Flags.setByValSize(MemSize);
11762       } else if (auto ParamAlign = Arg.getParamStackAlign()) {
11763         MemAlign = *ParamAlign;
11764       } else {
11765         MemAlign = OriginalAlignment;
11766       }
11767       Flags.setMemAlign(MemAlign);
11768 
11769       if (Arg.hasAttribute(Attribute::Nest))
11770         Flags.setNest();
11771       if (NeedsRegBlock)
11772         Flags.setInConsecutiveRegs();
11773       if (ArgCopyElisionCandidates.count(&Arg))
11774         Flags.setCopyElisionCandidate();
11775       if (Arg.hasAttribute(Attribute::Returned))
11776         Flags.setReturned();
11777 
11778       MVT RegisterVT = TLI->getRegisterTypeForCallingConv(
11779           *CurDAG->getContext(), F.getCallingConv(), VT);
11780       unsigned NumRegs = TLI->getNumRegistersForCallingConv(
11781           *CurDAG->getContext(), F.getCallingConv(), VT);
11782       for (unsigned i = 0; i != NumRegs; ++i) {
11783         // For scalable vectors, use the minimum size; individual targets
11784         // are responsible for handling scalable vector arguments and
11785         // return values.
11786         ISD::InputArg MyFlags(
11787             Flags, RegisterVT, VT, isArgValueUsed, ArgNo,
11788             PartBase + i * RegisterVT.getStoreSize().getKnownMinValue());
11789         if (NumRegs > 1 && i == 0)
11790           MyFlags.Flags.setSplit();
11791         // if it isn't first piece, alignment must be 1
11792         else if (i > 0) {
11793           MyFlags.Flags.setOrigAlign(Align(1));
11794           if (i == NumRegs - 1)
11795             MyFlags.Flags.setSplitEnd();
11796         }
11797         Ins.push_back(MyFlags);
11798       }
11799       if (NeedsRegBlock && Value == NumValues - 1)
11800         Ins[Ins.size() - 1].Flags.setInConsecutiveRegsLast();
11801       PartBase += VT.getStoreSize().getKnownMinValue();
11802     }
11803   }
11804 
11805   // Call the target to set up the argument values.
11806   SmallVector<SDValue, 8> InVals;
11807   SDValue NewRoot = TLI->LowerFormalArguments(
11808       DAG.getRoot(), F.getCallingConv(), F.isVarArg(), Ins, dl, DAG, InVals);
11809 
11810   // Verify that the target's LowerFormalArguments behaved as expected.
11811   assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other &&
11812          "LowerFormalArguments didn't return a valid chain!");
11813   assert(InVals.size() == Ins.size() &&
11814          "LowerFormalArguments didn't emit the correct number of values!");
11815   LLVM_DEBUG({
11816     for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
11817       assert(InVals[i].getNode() &&
11818              "LowerFormalArguments emitted a null value!");
11819       assert(EVT(Ins[i].VT) == InVals[i].getValueType() &&
11820              "LowerFormalArguments emitted a value with the wrong type!");
11821     }
11822   });
11823 
11824   // Update the DAG with the new chain value resulting from argument lowering.
11825   DAG.setRoot(NewRoot);
11826 
11827   // Set up the argument values.
11828   unsigned i = 0;
11829   if (!FuncInfo->CanLowerReturn) {
11830     // Create a virtual register for the sret pointer, and put in a copy
11831     // from the sret argument into it.
11832     SmallVector<EVT, 1> ValueVTs;
11833     ComputeValueVTs(*TLI, DAG.getDataLayout(),
11834                     PointerType::get(F.getContext(),
11835                                      DAG.getDataLayout().getAllocaAddrSpace()),
11836                     ValueVTs);
11837     MVT VT = ValueVTs[0].getSimpleVT();
11838     MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
11839     std::optional<ISD::NodeType> AssertOp;
11840     SDValue ArgValue =
11841         getCopyFromParts(DAG, dl, &InVals[0], 1, RegVT, VT, nullptr, NewRoot,
11842                          F.getCallingConv(), AssertOp);
11843 
11844     MachineFunction& MF = SDB->DAG.getMachineFunction();
11845     MachineRegisterInfo& RegInfo = MF.getRegInfo();
11846     Register SRetReg =
11847         RegInfo.createVirtualRegister(TLI->getRegClassFor(RegVT));
11848     FuncInfo->DemoteRegister = SRetReg;
11849     NewRoot =
11850         SDB->DAG.getCopyToReg(NewRoot, SDB->getCurSDLoc(), SRetReg, ArgValue);
11851     DAG.setRoot(NewRoot);
11852 
11853     // i indexes lowered arguments.  Bump it past the hidden sret argument.
11854     ++i;
11855   }
11856 
11857   SmallVector<SDValue, 4> Chains;
11858   DenseMap<int, int> ArgCopyElisionFrameIndexMap;
11859   for (const Argument &Arg : F.args()) {
11860     SmallVector<SDValue, 4> ArgValues;
11861     SmallVector<EVT, 4> ValueVTs;
11862     ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
11863     unsigned NumValues = ValueVTs.size();
11864     if (NumValues == 0)
11865       continue;
11866 
11867     bool ArgHasUses = !Arg.use_empty();
11868 
11869     // Elide the copying store if the target loaded this argument from a
11870     // suitable fixed stack object.
11871     if (Ins[i].Flags.isCopyElisionCandidate()) {
11872       unsigned NumParts = 0;
11873       for (EVT VT : ValueVTs)
11874         NumParts += TLI->getNumRegistersForCallingConv(*CurDAG->getContext(),
11875                                                        F.getCallingConv(), VT);
11876 
11877       tryToElideArgumentCopy(*FuncInfo, Chains, ArgCopyElisionFrameIndexMap,
11878                              ElidedArgCopyInstrs, ArgCopyElisionCandidates, Arg,
11879                              ArrayRef(&InVals[i], NumParts), ArgHasUses);
11880     }
11881 
11882     // If this argument is unused then remember its value. It is used to generate
11883     // debugging information.
11884     bool isSwiftErrorArg =
11885         TLI->supportSwiftError() &&
11886         Arg.hasAttribute(Attribute::SwiftError);
11887     if (!ArgHasUses && !isSwiftErrorArg) {
11888       SDB->setUnusedArgValue(&Arg, InVals[i]);
11889 
11890       // Also remember any frame index for use in FastISel.
11891       if (FrameIndexSDNode *FI =
11892           dyn_cast<FrameIndexSDNode>(InVals[i].getNode()))
11893         FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11894     }
11895 
11896     for (unsigned Val = 0; Val != NumValues; ++Val) {
11897       EVT VT = ValueVTs[Val];
11898       MVT PartVT = TLI->getRegisterTypeForCallingConv(*CurDAG->getContext(),
11899                                                       F.getCallingConv(), VT);
11900       unsigned NumParts = TLI->getNumRegistersForCallingConv(
11901           *CurDAG->getContext(), F.getCallingConv(), VT);
11902 
11903       // Even an apparent 'unused' swifterror argument needs to be returned. So
11904       // we do generate a copy for it that can be used on return from the
11905       // function.
11906       if (ArgHasUses || isSwiftErrorArg) {
11907         std::optional<ISD::NodeType> AssertOp;
11908         if (Arg.hasAttribute(Attribute::SExt))
11909           AssertOp = ISD::AssertSext;
11910         else if (Arg.hasAttribute(Attribute::ZExt))
11911           AssertOp = ISD::AssertZext;
11912 
11913         ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], NumParts,
11914                                              PartVT, VT, nullptr, NewRoot,
11915                                              F.getCallingConv(), AssertOp));
11916       }
11917 
11918       i += NumParts;
11919     }
11920 
11921     // We don't need to do anything else for unused arguments.
11922     if (ArgValues.empty())
11923       continue;
11924 
11925     // Note down frame index.
11926     if (FrameIndexSDNode *FI =
11927         dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode()))
11928       FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11929 
11930     SDValue Res = DAG.getMergeValues(ArrayRef(ArgValues.data(), NumValues),
11931                                      SDB->getCurSDLoc());
11932 
11933     SDB->setValue(&Arg, Res);
11934     if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::BUILD_PAIR) {
11935       // We want to associate the argument with the frame index, among
11936       // involved operands, that correspond to the lowest address. The
11937       // getCopyFromParts function, called earlier, is swapping the order of
11938       // the operands to BUILD_PAIR depending on endianness. The result of
11939       // that swapping is that the least significant bits of the argument will
11940       // be in the first operand of the BUILD_PAIR node, and the most
11941       // significant bits will be in the second operand.
11942       unsigned LowAddressOp = DAG.getDataLayout().isBigEndian() ? 1 : 0;
11943       if (LoadSDNode *LNode =
11944           dyn_cast<LoadSDNode>(Res.getOperand(LowAddressOp).getNode()))
11945         if (FrameIndexSDNode *FI =
11946             dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
11947           FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11948     }
11949 
11950     // Analyses past this point are naive and don't expect an assertion.
11951     if (Res.getOpcode() == ISD::AssertZext)
11952       Res = Res.getOperand(0);
11953 
11954     // Update the SwiftErrorVRegDefMap.
11955     if (Res.getOpcode() == ISD::CopyFromReg && isSwiftErrorArg) {
11956       Register Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
11957       if (Reg.isVirtual())
11958         SwiftError->setCurrentVReg(FuncInfo->MBB, SwiftError->getFunctionArg(),
11959                                    Reg);
11960     }
11961 
11962     // If this argument is live outside of the entry block, insert a copy from
11963     // wherever we got it to the vreg that other BB's will reference it as.
11964     if (Res.getOpcode() == ISD::CopyFromReg) {
11965       // If we can, though, try to skip creating an unnecessary vreg.
11966       // FIXME: This isn't very clean... it would be nice to make this more
11967       // general.
11968       Register Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
11969       if (Reg.isVirtual()) {
11970         FuncInfo->ValueMap[&Arg] = Reg;
11971         continue;
11972       }
11973     }
11974     if (!isOnlyUsedInEntryBlock(&Arg, TM.Options.EnableFastISel)) {
11975       FuncInfo->InitializeRegForValue(&Arg);
11976       SDB->CopyToExportRegsIfNeeded(&Arg);
11977     }
11978   }
11979 
11980   if (!Chains.empty()) {
11981     Chains.push_back(NewRoot);
11982     NewRoot = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
11983   }
11984 
11985   DAG.setRoot(NewRoot);
11986 
11987   assert(i == InVals.size() && "Argument register count mismatch!");
11988 
11989   // If any argument copy elisions occurred and we have debug info, update the
11990   // stale frame indices used in the dbg.declare variable info table.
11991   if (!ArgCopyElisionFrameIndexMap.empty()) {
11992     for (MachineFunction::VariableDbgInfo &VI :
11993          MF->getInStackSlotVariableDbgInfo()) {
11994       auto I = ArgCopyElisionFrameIndexMap.find(VI.getStackSlot());
11995       if (I != ArgCopyElisionFrameIndexMap.end())
11996         VI.updateStackSlot(I->second);
11997     }
11998   }
11999 
12000   // Finally, if the target has anything special to do, allow it to do so.
12001   emitFunctionEntryCode();
12002 }
12003 
12004 /// Handle PHI nodes in successor blocks.  Emit code into the SelectionDAG to
12005 /// ensure constants are generated when needed.  Remember the virtual registers
12006 /// that need to be added to the Machine PHI nodes as input.  We cannot just
12007 /// directly add them, because expansion might result in multiple MBB's for one
12008 /// BB.  As such, the start of the BB might correspond to a different MBB than
12009 /// the end.
12010 void
12011 SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
12012   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12013 
12014   SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
12015 
12016   // Check PHI nodes in successors that expect a value to be available from this
12017   // block.
12018   for (const BasicBlock *SuccBB : successors(LLVMBB->getTerminator())) {
12019     if (!isa<PHINode>(SuccBB->begin())) continue;
12020     MachineBasicBlock *SuccMBB = FuncInfo.getMBB(SuccBB);
12021 
12022     // If this terminator has multiple identical successors (common for
12023     // switches), only handle each succ once.
12024     if (!SuccsHandled.insert(SuccMBB).second)
12025       continue;
12026 
12027     MachineBasicBlock::iterator MBBI = SuccMBB->begin();
12028 
12029     // At this point we know that there is a 1-1 correspondence between LLVM PHI
12030     // nodes and Machine PHI nodes, but the incoming operands have not been
12031     // emitted yet.
12032     for (const PHINode &PN : SuccBB->phis()) {
12033       // Ignore dead phi's.
12034       if (PN.use_empty())
12035         continue;
12036 
12037       // Skip empty types
12038       if (PN.getType()->isEmptyTy())
12039         continue;
12040 
12041       unsigned Reg;
12042       const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
12043 
12044       if (const auto *C = dyn_cast<Constant>(PHIOp)) {
12045         unsigned &RegOut = ConstantsOut[C];
12046         if (RegOut == 0) {
12047           RegOut = FuncInfo.CreateRegs(C);
12048           // We need to zero/sign extend ConstantInt phi operands to match
12049           // assumptions in FunctionLoweringInfo::ComputePHILiveOutRegInfo.
12050           ISD::NodeType ExtendType = ISD::ANY_EXTEND;
12051           if (auto *CI = dyn_cast<ConstantInt>(C))
12052             ExtendType = TLI.signExtendConstant(CI) ? ISD::SIGN_EXTEND
12053                                                     : ISD::ZERO_EXTEND;
12054           CopyValueToVirtualRegister(C, RegOut, ExtendType);
12055         }
12056         Reg = RegOut;
12057       } else {
12058         DenseMap<const Value *, Register>::iterator I =
12059           FuncInfo.ValueMap.find(PHIOp);
12060         if (I != FuncInfo.ValueMap.end())
12061           Reg = I->second;
12062         else {
12063           assert(isa<AllocaInst>(PHIOp) &&
12064                  FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
12065                  "Didn't codegen value into a register!??");
12066           Reg = FuncInfo.CreateRegs(PHIOp);
12067           CopyValueToVirtualRegister(PHIOp, Reg);
12068         }
12069       }
12070 
12071       // Remember that this register needs to added to the machine PHI node as
12072       // the input for this MBB.
12073       SmallVector<EVT, 4> ValueVTs;
12074       ComputeValueVTs(TLI, DAG.getDataLayout(), PN.getType(), ValueVTs);
12075       for (EVT VT : ValueVTs) {
12076         const unsigned NumRegisters = TLI.getNumRegisters(*DAG.getContext(), VT);
12077         for (unsigned i = 0; i != NumRegisters; ++i)
12078           FuncInfo.PHINodesToUpdate.push_back(
12079               std::make_pair(&*MBBI++, Reg + i));
12080         Reg += NumRegisters;
12081       }
12082     }
12083   }
12084 
12085   ConstantsOut.clear();
12086 }
12087 
12088 MachineBasicBlock *SelectionDAGBuilder::NextBlock(MachineBasicBlock *MBB) {
12089   MachineFunction::iterator I(MBB);
12090   if (++I == FuncInfo.MF->end())
12091     return nullptr;
12092   return &*I;
12093 }
12094 
12095 /// During lowering new call nodes can be created (such as memset, etc.).
12096 /// Those will become new roots of the current DAG, but complications arise
12097 /// when they are tail calls. In such cases, the call lowering will update
12098 /// the root, but the builder still needs to know that a tail call has been
12099 /// lowered in order to avoid generating an additional return.
12100 void SelectionDAGBuilder::updateDAGForMaybeTailCall(SDValue MaybeTC) {
12101   // If the node is null, we do have a tail call.
12102   if (MaybeTC.getNode() != nullptr)
12103     DAG.setRoot(MaybeTC);
12104   else
12105     HasTailCall = true;
12106 }
12107 
12108 void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W, Value *Cond,
12109                                         MachineBasicBlock *SwitchMBB,
12110                                         MachineBasicBlock *DefaultMBB) {
12111   MachineFunction *CurMF = FuncInfo.MF;
12112   MachineBasicBlock *NextMBB = nullptr;
12113   MachineFunction::iterator BBI(W.MBB);
12114   if (++BBI != FuncInfo.MF->end())
12115     NextMBB = &*BBI;
12116 
12117   unsigned Size = W.LastCluster - W.FirstCluster + 1;
12118 
12119   BranchProbabilityInfo *BPI = FuncInfo.BPI;
12120 
12121   if (Size == 2 && W.MBB == SwitchMBB) {
12122     // If any two of the cases has the same destination, and if one value
12123     // is the same as the other, but has one bit unset that the other has set,
12124     // use bit manipulation to do two compares at once.  For example:
12125     // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
12126     // TODO: This could be extended to merge any 2 cases in switches with 3
12127     // cases.
12128     // TODO: Handle cases where W.CaseBB != SwitchBB.
12129     CaseCluster &Small = *W.FirstCluster;
12130     CaseCluster &Big = *W.LastCluster;
12131 
12132     if (Small.Low == Small.High && Big.Low == Big.High &&
12133         Small.MBB == Big.MBB) {
12134       const APInt &SmallValue = Small.Low->getValue();
12135       const APInt &BigValue = Big.Low->getValue();
12136 
12137       // Check that there is only one bit different.
12138       APInt CommonBit = BigValue ^ SmallValue;
12139       if (CommonBit.isPowerOf2()) {
12140         SDValue CondLHS = getValue(Cond);
12141         EVT VT = CondLHS.getValueType();
12142         SDLoc DL = getCurSDLoc();
12143 
12144         SDValue Or = DAG.getNode(ISD::OR, DL, VT, CondLHS,
12145                                  DAG.getConstant(CommonBit, DL, VT));
12146         SDValue Cond = DAG.getSetCC(
12147             DL, MVT::i1, Or, DAG.getConstant(BigValue | SmallValue, DL, VT),
12148             ISD::SETEQ);
12149 
12150         // Update successor info.
12151         // Both Small and Big will jump to Small.BB, so we sum up the
12152         // probabilities.
12153         addSuccessorWithProb(SwitchMBB, Small.MBB, Small.Prob + Big.Prob);
12154         if (BPI)
12155           addSuccessorWithProb(
12156               SwitchMBB, DefaultMBB,
12157               // The default destination is the first successor in IR.
12158               BPI->getEdgeProbability(SwitchMBB->getBasicBlock(), (unsigned)0));
12159         else
12160           addSuccessorWithProb(SwitchMBB, DefaultMBB);
12161 
12162         // Insert the true branch.
12163         SDValue BrCond =
12164             DAG.getNode(ISD::BRCOND, DL, MVT::Other, getControlRoot(), Cond,
12165                         DAG.getBasicBlock(Small.MBB));
12166         // Insert the false branch.
12167         BrCond = DAG.getNode(ISD::BR, DL, MVT::Other, BrCond,
12168                              DAG.getBasicBlock(DefaultMBB));
12169 
12170         DAG.setRoot(BrCond);
12171         return;
12172       }
12173     }
12174   }
12175 
12176   if (TM.getOptLevel() != CodeGenOptLevel::None) {
12177     // Here, we order cases by probability so the most likely case will be
12178     // checked first. However, two clusters can have the same probability in
12179     // which case their relative ordering is non-deterministic. So we use Low
12180     // as a tie-breaker as clusters are guaranteed to never overlap.
12181     llvm::sort(W.FirstCluster, W.LastCluster + 1,
12182                [](const CaseCluster &a, const CaseCluster &b) {
12183       return a.Prob != b.Prob ?
12184              a.Prob > b.Prob :
12185              a.Low->getValue().slt(b.Low->getValue());
12186     });
12187 
12188     // Rearrange the case blocks so that the last one falls through if possible
12189     // without changing the order of probabilities.
12190     for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster; ) {
12191       --I;
12192       if (I->Prob > W.LastCluster->Prob)
12193         break;
12194       if (I->Kind == CC_Range && I->MBB == NextMBB) {
12195         std::swap(*I, *W.LastCluster);
12196         break;
12197       }
12198     }
12199   }
12200 
12201   // Compute total probability.
12202   BranchProbability DefaultProb = W.DefaultProb;
12203   BranchProbability UnhandledProbs = DefaultProb;
12204   for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
12205     UnhandledProbs += I->Prob;
12206 
12207   MachineBasicBlock *CurMBB = W.MBB;
12208   for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
12209     bool FallthroughUnreachable = false;
12210     MachineBasicBlock *Fallthrough;
12211     if (I == W.LastCluster) {
12212       // For the last cluster, fall through to the default destination.
12213       Fallthrough = DefaultMBB;
12214       FallthroughUnreachable = isa<UnreachableInst>(
12215           DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg());
12216     } else {
12217       Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
12218       CurMF->insert(BBI, Fallthrough);
12219       // Put Cond in a virtual register to make it available from the new blocks.
12220       ExportFromCurrentBlock(Cond);
12221     }
12222     UnhandledProbs -= I->Prob;
12223 
12224     switch (I->Kind) {
12225       case CC_JumpTable: {
12226         // FIXME: Optimize away range check based on pivot comparisons.
12227         JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first;
12228         SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second;
12229 
12230         // The jump block hasn't been inserted yet; insert it here.
12231         MachineBasicBlock *JumpMBB = JT->MBB;
12232         CurMF->insert(BBI, JumpMBB);
12233 
12234         auto JumpProb = I->Prob;
12235         auto FallthroughProb = UnhandledProbs;
12236 
12237         // If the default statement is a target of the jump table, we evenly
12238         // distribute the default probability to successors of CurMBB. Also
12239         // update the probability on the edge from JumpMBB to Fallthrough.
12240         for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
12241                                               SE = JumpMBB->succ_end();
12242              SI != SE; ++SI) {
12243           if (*SI == DefaultMBB) {
12244             JumpProb += DefaultProb / 2;
12245             FallthroughProb -= DefaultProb / 2;
12246             JumpMBB->setSuccProbability(SI, DefaultProb / 2);
12247             JumpMBB->normalizeSuccProbs();
12248             break;
12249           }
12250         }
12251 
12252         // If the default clause is unreachable, propagate that knowledge into
12253         // JTH->FallthroughUnreachable which will use it to suppress the range
12254         // check.
12255         //
12256         // However, don't do this if we're doing branch target enforcement,
12257         // because a table branch _without_ a range check can be a tempting JOP
12258         // gadget - out-of-bounds inputs that are impossible in correct
12259         // execution become possible again if an attacker can influence the
12260         // control flow. So if an attacker doesn't already have a BTI bypass
12261         // available, we don't want them to be able to get one out of this
12262         // table branch.
12263         if (FallthroughUnreachable) {
12264           Function &CurFunc = CurMF->getFunction();
12265           if (!CurFunc.hasFnAttribute("branch-target-enforcement"))
12266             JTH->FallthroughUnreachable = true;
12267         }
12268 
12269         if (!JTH->FallthroughUnreachable)
12270           addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
12271         addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
12272         CurMBB->normalizeSuccProbs();
12273 
12274         // The jump table header will be inserted in our current block, do the
12275         // range check, and fall through to our fallthrough block.
12276         JTH->HeaderBB = CurMBB;
12277         JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
12278 
12279         // If we're in the right place, emit the jump table header right now.
12280         if (CurMBB == SwitchMBB) {
12281           visitJumpTableHeader(*JT, *JTH, SwitchMBB);
12282           JTH->Emitted = true;
12283         }
12284         break;
12285       }
12286       case CC_BitTests: {
12287         // FIXME: Optimize away range check based on pivot comparisons.
12288         BitTestBlock *BTB = &SL->BitTestCases[I->BTCasesIndex];
12289 
12290         // The bit test blocks haven't been inserted yet; insert them here.
12291         for (BitTestCase &BTC : BTB->Cases)
12292           CurMF->insert(BBI, BTC.ThisBB);
12293 
12294         // Fill in fields of the BitTestBlock.
12295         BTB->Parent = CurMBB;
12296         BTB->Default = Fallthrough;
12297 
12298         BTB->DefaultProb = UnhandledProbs;
12299         // If the cases in bit test don't form a contiguous range, we evenly
12300         // distribute the probability on the edge to Fallthrough to two
12301         // successors of CurMBB.
12302         if (!BTB->ContiguousRange) {
12303           BTB->Prob += DefaultProb / 2;
12304           BTB->DefaultProb -= DefaultProb / 2;
12305         }
12306 
12307         if (FallthroughUnreachable)
12308           BTB->FallthroughUnreachable = true;
12309 
12310         // If we're in the right place, emit the bit test header right now.
12311         if (CurMBB == SwitchMBB) {
12312           visitBitTestHeader(*BTB, SwitchMBB);
12313           BTB->Emitted = true;
12314         }
12315         break;
12316       }
12317       case CC_Range: {
12318         const Value *RHS, *LHS, *MHS;
12319         ISD::CondCode CC;
12320         if (I->Low == I->High) {
12321           // Check Cond == I->Low.
12322           CC = ISD::SETEQ;
12323           LHS = Cond;
12324           RHS=I->Low;
12325           MHS = nullptr;
12326         } else {
12327           // Check I->Low <= Cond <= I->High.
12328           CC = ISD::SETLE;
12329           LHS = I->Low;
12330           MHS = Cond;
12331           RHS = I->High;
12332         }
12333 
12334         // If Fallthrough is unreachable, fold away the comparison.
12335         if (FallthroughUnreachable)
12336           CC = ISD::SETTRUE;
12337 
12338         // The false probability is the sum of all unhandled cases.
12339         CaseBlock CB(CC, LHS, RHS, MHS, I->MBB, Fallthrough, CurMBB,
12340                      getCurSDLoc(), I->Prob, UnhandledProbs);
12341 
12342         if (CurMBB == SwitchMBB)
12343           visitSwitchCase(CB, SwitchMBB);
12344         else
12345           SL->SwitchCases.push_back(CB);
12346 
12347         break;
12348       }
12349     }
12350     CurMBB = Fallthrough;
12351   }
12352 }
12353 
12354 void SelectionDAGBuilder::splitWorkItem(SwitchWorkList &WorkList,
12355                                         const SwitchWorkListItem &W,
12356                                         Value *Cond,
12357                                         MachineBasicBlock *SwitchMBB) {
12358   assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) &&
12359          "Clusters not sorted?");
12360   assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!");
12361 
12362   auto [LastLeft, FirstRight, LeftProb, RightProb] =
12363       SL->computeSplitWorkItemInfo(W);
12364 
12365   // Use the first element on the right as pivot since we will make less-than
12366   // comparisons against it.
12367   CaseClusterIt PivotCluster = FirstRight;
12368   assert(PivotCluster > W.FirstCluster);
12369   assert(PivotCluster <= W.LastCluster);
12370 
12371   CaseClusterIt FirstLeft = W.FirstCluster;
12372   CaseClusterIt LastRight = W.LastCluster;
12373 
12374   const ConstantInt *Pivot = PivotCluster->Low;
12375 
12376   // New blocks will be inserted immediately after the current one.
12377   MachineFunction::iterator BBI(W.MBB);
12378   ++BBI;
12379 
12380   // We will branch to the LHS if Value < Pivot. If LHS is a single cluster,
12381   // we can branch to its destination directly if it's squeezed exactly in
12382   // between the known lower bound and Pivot - 1.
12383   MachineBasicBlock *LeftMBB;
12384   if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&
12385       FirstLeft->Low == W.GE &&
12386       (FirstLeft->High->getValue() + 1LL) == Pivot->getValue()) {
12387     LeftMBB = FirstLeft->MBB;
12388   } else {
12389     LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
12390     FuncInfo.MF->insert(BBI, LeftMBB);
12391     WorkList.push_back(
12392         {LeftMBB, FirstLeft, LastLeft, W.GE, Pivot, W.DefaultProb / 2});
12393     // Put Cond in a virtual register to make it available from the new blocks.
12394     ExportFromCurrentBlock(Cond);
12395   }
12396 
12397   // Similarly, we will branch to the RHS if Value >= Pivot. If RHS is a
12398   // single cluster, RHS.Low == Pivot, and we can branch to its destination
12399   // directly if RHS.High equals the current upper bound.
12400   MachineBasicBlock *RightMBB;
12401   if (FirstRight == LastRight && FirstRight->Kind == CC_Range &&
12402       W.LT && (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) {
12403     RightMBB = FirstRight->MBB;
12404   } else {
12405     RightMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
12406     FuncInfo.MF->insert(BBI, RightMBB);
12407     WorkList.push_back(
12408         {RightMBB, FirstRight, LastRight, Pivot, W.LT, W.DefaultProb / 2});
12409     // Put Cond in a virtual register to make it available from the new blocks.
12410     ExportFromCurrentBlock(Cond);
12411   }
12412 
12413   // Create the CaseBlock record that will be used to lower the branch.
12414   CaseBlock CB(ISD::SETLT, Cond, Pivot, nullptr, LeftMBB, RightMBB, W.MBB,
12415                getCurSDLoc(), LeftProb, RightProb);
12416 
12417   if (W.MBB == SwitchMBB)
12418     visitSwitchCase(CB, SwitchMBB);
12419   else
12420     SL->SwitchCases.push_back(CB);
12421 }
12422 
12423 // Scale CaseProb after peeling a case with the probablity of PeeledCaseProb
12424 // from the swith statement.
12425 static BranchProbability scaleCaseProbality(BranchProbability CaseProb,
12426                                             BranchProbability PeeledCaseProb) {
12427   if (PeeledCaseProb == BranchProbability::getOne())
12428     return BranchProbability::getZero();
12429   BranchProbability SwitchProb = PeeledCaseProb.getCompl();
12430 
12431   uint32_t Numerator = CaseProb.getNumerator();
12432   uint32_t Denominator = SwitchProb.scale(CaseProb.getDenominator());
12433   return BranchProbability(Numerator, std::max(Numerator, Denominator));
12434 }
12435 
12436 // Try to peel the top probability case if it exceeds the threshold.
12437 // Return current MachineBasicBlock for the switch statement if the peeling
12438 // does not occur.
12439 // If the peeling is performed, return the newly created MachineBasicBlock
12440 // for the peeled switch statement. Also update Clusters to remove the peeled
12441 // case. PeeledCaseProb is the BranchProbability for the peeled case.
12442 MachineBasicBlock *SelectionDAGBuilder::peelDominantCaseCluster(
12443     const SwitchInst &SI, CaseClusterVector &Clusters,
12444     BranchProbability &PeeledCaseProb) {
12445   MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
12446   // Don't perform if there is only one cluster or optimizing for size.
12447   if (SwitchPeelThreshold > 100 || !FuncInfo.BPI || Clusters.size() < 2 ||
12448       TM.getOptLevel() == CodeGenOptLevel::None ||
12449       SwitchMBB->getParent()->getFunction().hasMinSize())
12450     return SwitchMBB;
12451 
12452   BranchProbability TopCaseProb = BranchProbability(SwitchPeelThreshold, 100);
12453   unsigned PeeledCaseIndex = 0;
12454   bool SwitchPeeled = false;
12455   for (unsigned Index = 0; Index < Clusters.size(); ++Index) {
12456     CaseCluster &CC = Clusters[Index];
12457     if (CC.Prob < TopCaseProb)
12458       continue;
12459     TopCaseProb = CC.Prob;
12460     PeeledCaseIndex = Index;
12461     SwitchPeeled = true;
12462   }
12463   if (!SwitchPeeled)
12464     return SwitchMBB;
12465 
12466   LLVM_DEBUG(dbgs() << "Peeled one top case in switch stmt, prob: "
12467                     << TopCaseProb << "\n");
12468 
12469   // Record the MBB for the peeled switch statement.
12470   MachineFunction::iterator BBI(SwitchMBB);
12471   ++BBI;
12472   MachineBasicBlock *PeeledSwitchMBB =
12473       FuncInfo.MF->CreateMachineBasicBlock(SwitchMBB->getBasicBlock());
12474   FuncInfo.MF->insert(BBI, PeeledSwitchMBB);
12475 
12476   ExportFromCurrentBlock(SI.getCondition());
12477   auto PeeledCaseIt = Clusters.begin() + PeeledCaseIndex;
12478   SwitchWorkListItem W = {SwitchMBB, PeeledCaseIt, PeeledCaseIt,
12479                           nullptr,   nullptr,      TopCaseProb.getCompl()};
12480   lowerWorkItem(W, SI.getCondition(), SwitchMBB, PeeledSwitchMBB);
12481 
12482   Clusters.erase(PeeledCaseIt);
12483   for (CaseCluster &CC : Clusters) {
12484     LLVM_DEBUG(
12485         dbgs() << "Scale the probablity for one cluster, before scaling: "
12486                << CC.Prob << "\n");
12487     CC.Prob = scaleCaseProbality(CC.Prob, TopCaseProb);
12488     LLVM_DEBUG(dbgs() << "After scaling: " << CC.Prob << "\n");
12489   }
12490   PeeledCaseProb = TopCaseProb;
12491   return PeeledSwitchMBB;
12492 }
12493 
12494 void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
12495   // Extract cases from the switch.
12496   BranchProbabilityInfo *BPI = FuncInfo.BPI;
12497   CaseClusterVector Clusters;
12498   Clusters.reserve(SI.getNumCases());
12499   for (auto I : SI.cases()) {
12500     MachineBasicBlock *Succ = FuncInfo.getMBB(I.getCaseSuccessor());
12501     const ConstantInt *CaseVal = I.getCaseValue();
12502     BranchProbability Prob =
12503         BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
12504             : BranchProbability(1, SI.getNumCases() + 1);
12505     Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
12506   }
12507 
12508   MachineBasicBlock *DefaultMBB = FuncInfo.getMBB(SI.getDefaultDest());
12509 
12510   // Cluster adjacent cases with the same destination. We do this at all
12511   // optimization levels because it's cheap to do and will make codegen faster
12512   // if there are many clusters.
12513   sortAndRangeify(Clusters);
12514 
12515   // The branch probablity of the peeled case.
12516   BranchProbability PeeledCaseProb = BranchProbability::getZero();
12517   MachineBasicBlock *PeeledSwitchMBB =
12518       peelDominantCaseCluster(SI, Clusters, PeeledCaseProb);
12519 
12520   // If there is only the default destination, jump there directly.
12521   MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
12522   if (Clusters.empty()) {
12523     assert(PeeledSwitchMBB == SwitchMBB);
12524     SwitchMBB->addSuccessor(DefaultMBB);
12525     if (DefaultMBB != NextBlock(SwitchMBB)) {
12526       DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
12527                               getControlRoot(), DAG.getBasicBlock(DefaultMBB)));
12528     }
12529     return;
12530   }
12531 
12532   SL->findJumpTables(Clusters, &SI, getCurSDLoc(), DefaultMBB, DAG.getPSI(),
12533                      DAG.getBFI());
12534   SL->findBitTestClusters(Clusters, &SI);
12535 
12536   LLVM_DEBUG({
12537     dbgs() << "Case clusters: ";
12538     for (const CaseCluster &C : Clusters) {
12539       if (C.Kind == CC_JumpTable)
12540         dbgs() << "JT:";
12541       if (C.Kind == CC_BitTests)
12542         dbgs() << "BT:";
12543 
12544       C.Low->getValue().print(dbgs(), true);
12545       if (C.Low != C.High) {
12546         dbgs() << '-';
12547         C.High->getValue().print(dbgs(), true);
12548       }
12549       dbgs() << ' ';
12550     }
12551     dbgs() << '\n';
12552   });
12553 
12554   assert(!Clusters.empty());
12555   SwitchWorkList WorkList;
12556   CaseClusterIt First = Clusters.begin();
12557   CaseClusterIt Last = Clusters.end() - 1;
12558   auto DefaultProb = getEdgeProbability(PeeledSwitchMBB, DefaultMBB);
12559   // Scale the branchprobability for DefaultMBB if the peel occurs and
12560   // DefaultMBB is not replaced.
12561   if (PeeledCaseProb != BranchProbability::getZero() &&
12562       DefaultMBB == FuncInfo.getMBB(SI.getDefaultDest()))
12563     DefaultProb = scaleCaseProbality(DefaultProb, PeeledCaseProb);
12564   WorkList.push_back(
12565       {PeeledSwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
12566 
12567   while (!WorkList.empty()) {
12568     SwitchWorkListItem W = WorkList.pop_back_val();
12569     unsigned NumClusters = W.LastCluster - W.FirstCluster + 1;
12570 
12571     if (NumClusters > 3 && TM.getOptLevel() != CodeGenOptLevel::None &&
12572         !DefaultMBB->getParent()->getFunction().hasMinSize()) {
12573       // For optimized builds, lower large range as a balanced binary tree.
12574       splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB);
12575       continue;
12576     }
12577 
12578     lowerWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB);
12579   }
12580 }
12581 
12582 void SelectionDAGBuilder::visitStepVector(const CallInst &I) {
12583   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12584   auto DL = getCurSDLoc();
12585   EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
12586   setValue(&I, DAG.getStepVector(DL, ResultVT));
12587 }
12588 
12589 void SelectionDAGBuilder::visitVectorReverse(const CallInst &I) {
12590   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12591   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
12592 
12593   SDLoc DL = getCurSDLoc();
12594   SDValue V = getValue(I.getOperand(0));
12595   assert(VT == V.getValueType() && "Malformed vector.reverse!");
12596 
12597   if (VT.isScalableVector()) {
12598     setValue(&I, DAG.getNode(ISD::VECTOR_REVERSE, DL, VT, V));
12599     return;
12600   }
12601 
12602   // Use VECTOR_SHUFFLE for the fixed-length vector
12603   // to maintain existing behavior.
12604   SmallVector<int, 8> Mask;
12605   unsigned NumElts = VT.getVectorMinNumElements();
12606   for (unsigned i = 0; i != NumElts; ++i)
12607     Mask.push_back(NumElts - 1 - i);
12608 
12609   setValue(&I, DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), Mask));
12610 }
12611 
12612 void SelectionDAGBuilder::visitVectorDeinterleave(const CallInst &I) {
12613   auto DL = getCurSDLoc();
12614   SDValue InVec = getValue(I.getOperand(0));
12615   EVT OutVT =
12616       InVec.getValueType().getHalfNumVectorElementsVT(*DAG.getContext());
12617 
12618   unsigned OutNumElts = OutVT.getVectorMinNumElements();
12619 
12620   // ISD Node needs the input vectors split into two equal parts
12621   SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OutVT, InVec,
12622                            DAG.getVectorIdxConstant(0, DL));
12623   SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OutVT, InVec,
12624                            DAG.getVectorIdxConstant(OutNumElts, DL));
12625 
12626   // Use VECTOR_SHUFFLE for fixed-length vectors to benefit from existing
12627   // legalisation and combines.
12628   if (OutVT.isFixedLengthVector()) {
12629     SDValue Even = DAG.getVectorShuffle(OutVT, DL, Lo, Hi,
12630                                         createStrideMask(0, 2, OutNumElts));
12631     SDValue Odd = DAG.getVectorShuffle(OutVT, DL, Lo, Hi,
12632                                        createStrideMask(1, 2, OutNumElts));
12633     SDValue Res = DAG.getMergeValues({Even, Odd}, getCurSDLoc());
12634     setValue(&I, Res);
12635     return;
12636   }
12637 
12638   SDValue Res = DAG.getNode(ISD::VECTOR_DEINTERLEAVE, DL,
12639                             DAG.getVTList(OutVT, OutVT), Lo, Hi);
12640   setValue(&I, Res);
12641 }
12642 
12643 void SelectionDAGBuilder::visitVectorInterleave(const CallInst &I) {
12644   auto DL = getCurSDLoc();
12645   EVT InVT = getValue(I.getOperand(0)).getValueType();
12646   SDValue InVec0 = getValue(I.getOperand(0));
12647   SDValue InVec1 = getValue(I.getOperand(1));
12648   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12649   EVT OutVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
12650 
12651   // Use VECTOR_SHUFFLE for fixed-length vectors to benefit from existing
12652   // legalisation and combines.
12653   if (OutVT.isFixedLengthVector()) {
12654     unsigned NumElts = InVT.getVectorMinNumElements();
12655     SDValue V = DAG.getNode(ISD::CONCAT_VECTORS, DL, OutVT, InVec0, InVec1);
12656     setValue(&I, DAG.getVectorShuffle(OutVT, DL, V, DAG.getUNDEF(OutVT),
12657                                       createInterleaveMask(NumElts, 2)));
12658     return;
12659   }
12660 
12661   SDValue Res = DAG.getNode(ISD::VECTOR_INTERLEAVE, DL,
12662                             DAG.getVTList(InVT, InVT), InVec0, InVec1);
12663   Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, OutVT, Res.getValue(0),
12664                     Res.getValue(1));
12665   setValue(&I, Res);
12666 }
12667 
12668 void SelectionDAGBuilder::visitFreeze(const FreezeInst &I) {
12669   SmallVector<EVT, 4> ValueVTs;
12670   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
12671                   ValueVTs);
12672   unsigned NumValues = ValueVTs.size();
12673   if (NumValues == 0) return;
12674 
12675   SmallVector<SDValue, 4> Values(NumValues);
12676   SDValue Op = getValue(I.getOperand(0));
12677 
12678   for (unsigned i = 0; i != NumValues; ++i)
12679     Values[i] = DAG.getNode(ISD::FREEZE, getCurSDLoc(), ValueVTs[i],
12680                             SDValue(Op.getNode(), Op.getResNo() + i));
12681 
12682   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
12683                            DAG.getVTList(ValueVTs), Values));
12684 }
12685 
12686 void SelectionDAGBuilder::visitVectorSplice(const CallInst &I) {
12687   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12688   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
12689 
12690   SDLoc DL = getCurSDLoc();
12691   SDValue V1 = getValue(I.getOperand(0));
12692   SDValue V2 = getValue(I.getOperand(1));
12693   int64_t Imm = cast<ConstantInt>(I.getOperand(2))->getSExtValue();
12694 
12695   // VECTOR_SHUFFLE doesn't support a scalable mask so use a dedicated node.
12696   if (VT.isScalableVector()) {
12697     setValue(
12698         &I, DAG.getNode(ISD::VECTOR_SPLICE, DL, VT, V1, V2,
12699                         DAG.getSignedConstant(
12700                             Imm, DL, TLI.getVectorIdxTy(DAG.getDataLayout()))));
12701     return;
12702   }
12703 
12704   unsigned NumElts = VT.getVectorNumElements();
12705 
12706   uint64_t Idx = (NumElts + Imm) % NumElts;
12707 
12708   // Use VECTOR_SHUFFLE to maintain original behaviour for fixed-length vectors.
12709   SmallVector<int, 8> Mask;
12710   for (unsigned i = 0; i < NumElts; ++i)
12711     Mask.push_back(Idx + i);
12712   setValue(&I, DAG.getVectorShuffle(VT, DL, V1, V2, Mask));
12713 }
12714 
12715 // Consider the following MIR after SelectionDAG, which produces output in
12716 // phyregs in the first case or virtregs in the second case.
12717 //
12718 // INLINEASM_BR ..., implicit-def $ebx, ..., implicit-def $edx
12719 // %5:gr32 = COPY $ebx
12720 // %6:gr32 = COPY $edx
12721 // %1:gr32 = COPY %6:gr32
12722 // %0:gr32 = COPY %5:gr32
12723 //
12724 // INLINEASM_BR ..., def %5:gr32, ..., def %6:gr32
12725 // %1:gr32 = COPY %6:gr32
12726 // %0:gr32 = COPY %5:gr32
12727 //
12728 // Given %0, we'd like to return $ebx in the first case and %5 in the second.
12729 // Given %1, we'd like to return $edx in the first case and %6 in the second.
12730 //
12731 // If a callbr has outputs, it will have a single mapping in FuncInfo.ValueMap
12732 // to a single virtreg (such as %0). The remaining outputs monotonically
12733 // increase in virtreg number from there. If a callbr has no outputs, then it
12734 // should not have a corresponding callbr landingpad; in fact, the callbr
12735 // landingpad would not even be able to refer to such a callbr.
12736 static Register FollowCopyChain(MachineRegisterInfo &MRI, Register Reg) {
12737   MachineInstr *MI = MRI.def_begin(Reg)->getParent();
12738   // There is definitely at least one copy.
12739   assert(MI->getOpcode() == TargetOpcode::COPY &&
12740          "start of copy chain MUST be COPY");
12741   Reg = MI->getOperand(1).getReg();
12742   MI = MRI.def_begin(Reg)->getParent();
12743   // There may be an optional second copy.
12744   if (MI->getOpcode() == TargetOpcode::COPY) {
12745     assert(Reg.isVirtual() && "expected COPY of virtual register");
12746     Reg = MI->getOperand(1).getReg();
12747     assert(Reg.isPhysical() && "expected COPY of physical register");
12748     MI = MRI.def_begin(Reg)->getParent();
12749   }
12750   // The start of the chain must be an INLINEASM_BR.
12751   assert(MI->getOpcode() == TargetOpcode::INLINEASM_BR &&
12752          "end of copy chain MUST be INLINEASM_BR");
12753   return Reg;
12754 }
12755 
12756 // We must do this walk rather than the simpler
12757 //   setValue(&I, getCopyFromRegs(CBR, CBR->getType()));
12758 // otherwise we will end up with copies of virtregs only valid along direct
12759 // edges.
12760 void SelectionDAGBuilder::visitCallBrLandingPad(const CallInst &I) {
12761   SmallVector<EVT, 8> ResultVTs;
12762   SmallVector<SDValue, 8> ResultValues;
12763   const auto *CBR =
12764       cast<CallBrInst>(I.getParent()->getUniquePredecessor()->getTerminator());
12765 
12766   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12767   const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
12768   MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
12769 
12770   unsigned InitialDef = FuncInfo.ValueMap[CBR];
12771   SDValue Chain = DAG.getRoot();
12772 
12773   // Re-parse the asm constraints string.
12774   TargetLowering::AsmOperandInfoVector TargetConstraints =
12775       TLI.ParseConstraints(DAG.getDataLayout(), TRI, *CBR);
12776   for (auto &T : TargetConstraints) {
12777     SDISelAsmOperandInfo OpInfo(T);
12778     if (OpInfo.Type != InlineAsm::isOutput)
12779       continue;
12780 
12781     // Pencil in OpInfo.ConstraintType and OpInfo.ConstraintVT based on the
12782     // individual constraint.
12783     TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
12784 
12785     switch (OpInfo.ConstraintType) {
12786     case TargetLowering::C_Register:
12787     case TargetLowering::C_RegisterClass: {
12788       // Fill in OpInfo.AssignedRegs.Regs.
12789       getRegistersForValue(DAG, getCurSDLoc(), OpInfo, OpInfo);
12790 
12791       // getRegistersForValue may produce 1 to many registers based on whether
12792       // the OpInfo.ConstraintVT is legal on the target or not.
12793       for (Register &Reg : OpInfo.AssignedRegs.Regs) {
12794         Register OriginalDef = FollowCopyChain(MRI, InitialDef++);
12795         if (Register::isPhysicalRegister(OriginalDef))
12796           FuncInfo.MBB->addLiveIn(OriginalDef);
12797         // Update the assigned registers to use the original defs.
12798         Reg = OriginalDef;
12799       }
12800 
12801       SDValue V = OpInfo.AssignedRegs.getCopyFromRegs(
12802           DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, CBR);
12803       ResultValues.push_back(V);
12804       ResultVTs.push_back(OpInfo.ConstraintVT);
12805       break;
12806     }
12807     case TargetLowering::C_Other: {
12808       SDValue Flag;
12809       SDValue V = TLI.LowerAsmOutputForConstraint(Chain, Flag, getCurSDLoc(),
12810                                                   OpInfo, DAG);
12811       ++InitialDef;
12812       ResultValues.push_back(V);
12813       ResultVTs.push_back(OpInfo.ConstraintVT);
12814       break;
12815     }
12816     default:
12817       break;
12818     }
12819   }
12820   SDValue V = DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
12821                           DAG.getVTList(ResultVTs), ResultValues);
12822   setValue(&I, V);
12823 }
12824